code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : int = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'beit'
def __init__(self, lowerCamelCase_=8_1_9_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=2_2_4, lowerCamelCase_=1_6, lowerCamelCase_=3, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=True, lowerCamelCase_=[3, 5, 7, 1_1], lowerCamelCase_=[1, 2, 3, 6], lowerCamelCase_=True, lowerCamelCase_=0.4, lowerCamelCase_=2_5_6, lowerCamelCase_=1, lowerCamelCase_=False, lowerCamelCase_=2_5_5, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = use_mask_token
lowerCamelCase__ : Any = use_absolute_position_embeddings
lowerCamelCase__ : Union[str, Any] = use_relative_position_bias
lowerCamelCase__ : Optional[Any] = use_shared_relative_position_bias
lowerCamelCase__ : List[Any] = layer_scale_init_value
lowerCamelCase__ : Union[str, Any] = drop_path_rate
lowerCamelCase__ : int = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : List[str] = out_indices
lowerCamelCase__ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : List[Any] = use_auxiliary_head
lowerCamelCase__ : int = auxiliary_loss_weight
lowerCamelCase__ : Optional[Any] = auxiliary_channels
lowerCamelCase__ : List[Any] = auxiliary_num_convs
lowerCamelCase__ : List[str] = auxiliary_concat_input
lowerCamelCase__ : List[Any] = semantic_loss_ignore_index
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = version.parse('1.11' )
@property
def a__ (self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class a_ ( snake_case_ ):
'''simple docstring'''
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowerCamelCase__ : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
lowerCamelCase__ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowerCamelCase__ : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase_ )
BertModel.from_pretrained(lowerCamelCase_ )
BertTokenizer.from_pretrained(lowerCamelCase_ )
pipeline(task='fill-mask', model=lowerCamelCase_ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : Dict = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowerCamelCase__ : Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[Any] = '1'
lowerCamelCase__ : List[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
lowerCamelCase__ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
lowerCamelCase__ : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
lowerCamelCase__ : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase_ )
BertModel.from_pretrained(lowerCamelCase_ )
BertTokenizer.from_pretrained(lowerCamelCase_ )
pipeline(task='fill-mask', model=lowerCamelCase_ )
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
lowerCamelCase__ : Union[str, Any] = self.get_env()
lowerCamelCase__ : List[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
lowerCamelCase__ : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
lowerCamelCase__ : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowerCamelCase__ : List[Any] = self.get_env()
lowerCamelCase__ : int = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# next emulate no network
lowerCamelCase__ : Optional[int] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : str = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = '\nfrom transformers import pipeline\n '
lowerCamelCase__ : int = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
lowerCamelCase__ : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
lowerCamelCase__ : List[str] = self.get_env()
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
lowerCamelCase__ : Any = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 1, result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', '' ), )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = '\nfrom transformers import AutoModel\n '
lowerCamelCase__ : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
lowerCamelCase__ : List[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
lowerCamelCase__ : int = self.get_env()
lowerCamelCase__ : Optional[Any] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase__ : List[str] = '1'
lowerCamelCase__ : Optional[int] = subprocess.run(lowerCamelCase_, env=lowerCamelCase_, check=lowerCamelCase_, capture_output=lowerCamelCase_ )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('success', result.stdout.decode() )
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'transfo-xl'
lowerCamelCase__ : str = ['mems']
lowerCamelCase__ : int = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self, lowerCamelCase_=2_6_7_7_3_5, lowerCamelCase_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0], lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_6, lowerCamelCase_=6_4, lowerCamelCase_=4_0_9_6, lowerCamelCase_=4, lowerCamelCase_=False, lowerCamelCase_=1_8, lowerCamelCase_=1_6_0_0, lowerCamelCase_=1_0_0_0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=-1, lowerCamelCase_=True, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_="normal", lowerCamelCase_=0.01, lowerCamelCase_=0.01, lowerCamelCase_=0.02, lowerCamelCase_=1e-5, lowerCamelCase_=0, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
lowerCamelCase__ : Dict = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase__ : Tuple = [False] + [False] * len(self.cutoffs )
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[Any] = d_embed
lowerCamelCase__ : Optional[Any] = d_head
lowerCamelCase__ : Optional[int] = d_inner
lowerCamelCase__ : Optional[Any] = div_val
lowerCamelCase__ : List[str] = pre_lnorm
lowerCamelCase__ : List[Any] = n_layer
lowerCamelCase__ : Tuple = n_head
lowerCamelCase__ : Union[str, Any] = mem_len
lowerCamelCase__ : str = same_length
lowerCamelCase__ : Tuple = attn_type
lowerCamelCase__ : Union[str, Any] = clamp_len
lowerCamelCase__ : List[Any] = sample_softmax
lowerCamelCase__ : List[Any] = adaptive
lowerCamelCase__ : List[Any] = dropout
lowerCamelCase__ : int = dropatt
lowerCamelCase__ : Dict = untie_r
lowerCamelCase__ : Tuple = init
lowerCamelCase__ : Union[str, Any] = init_range
lowerCamelCase__ : Union[str, Any] = proj_init_std
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A_ : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowerCamelCase_ ( _lowerCamelCase=None ):
if subparsers is not None:
lowerCamelCase__ : Dict = subparsers.add_parser('tpu-config' , description=_description )
else:
lowerCamelCase__ : str = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
lowerCamelCase__ : str = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_lowerCamelCase , default=_lowerCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_lowerCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_lowerCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
lowerCamelCase__ : Optional[int] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_lowerCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase__ : int = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase__ : List[str] = defaults.commands
if not args.tpu_name:
lowerCamelCase__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase__ : Any = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase__ : Any = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
lowerCamelCase__ : int = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _lowerCamelCase ):
lowerCamelCase__ : List[str] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
lowerCamelCase__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase ):
lowerCamelCase__ : Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase__ : List[str] = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
lowerCamelCase__ : int = '; '.join(_lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase__ : str = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(_lowerCamelCase )}''' )
return
subprocess.run(_lowerCamelCase )
print('Successfully setup pod.' )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = tpu_command_parser()
lowerCamelCase__ : str = parser.parse_args()
tpu_command_launcher(_lowerCamelCase )
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'van'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=[7, 3, 3, 3], lowerCamelCase_=[4, 2, 2, 2], lowerCamelCase_=[6_4, 1_2_8, 3_2_0, 5_1_2], lowerCamelCase_=[3, 3, 1_2, 3], lowerCamelCase_=[8, 8, 4, 4], lowerCamelCase_="gelu", lowerCamelCase_=0.02, lowerCamelCase_=1e-6, lowerCamelCase_=1e-2, lowerCamelCase_=0.0, lowerCamelCase_=0.0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Dict = patch_sizes
lowerCamelCase__ : Any = strides
lowerCamelCase__ : int = hidden_sizes
lowerCamelCase__ : List[str] = depths
lowerCamelCase__ : Dict = mlp_ratios
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : List[Any] = drop_path_rate
lowerCamelCase__ : Dict = dropout_rate
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = XLMProphetNetTokenizer
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[str] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : List[str] = XLMProphetNetTokenizer(lowerCamelCase_, keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = '[PAD]'
lowerCamelCase__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '[PAD]' )
self.assertEqual(vocab_keys[1], '[CLS]' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(lowerCamelCase_ ), 1_0_1_2 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_1_2 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = XLMProphetNetTokenizer(lowerCamelCase_, keep_accents=lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
lowerCamelCase__ : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
], )
lowerCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
], )
@cached_property
def a__ (self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'Hello World!'
lowerCamelCase__ : Optional[int] = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(lowerCamelCase_, self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='microsoft/xprophetnet-large-wiki100-cased', revision='1acad1643ddd54a44df6a1b797ada8373685d90e', )
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
if len(_lowerCamelCase ) == 0:
return []
lowerCamelCase__ , lowerCamelCase__ : Tuple = min(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase__ : str = int(max_value - min_value ) + 1
lowerCamelCase__ : list[list] = [[] for _ in range(_lowerCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCamelCase )
return [v for bucket in buckets for v in sorted(_lowerCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A_ : int = None
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
A_ : Union[str, Any] = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = TaTokenizer
lowerCamelCase__ : List[int] = []
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="</s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_=1_0_0, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase__ : int = len(set(filter(lambda lowerCamelCase_ : bool('extra_id_' in str(lowerCamelCase_ ) ), lowerCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, pad_token=lowerCamelCase_, extra_ids=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : Any = False if not self.vocab_file else True
lowerCamelCase__ : Dict = extra_ids
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.', lowerCamelCase_, )
return max_model_length
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Tuple = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase__ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ (self ):
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase_ : bool(re.search(r'<extra_id_\d+>', lowerCamelCase_ ) ) is not None, self.additional_special_tokens ) ) )
def a__ (self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCamelCase_ ) for token in self.get_sentinel_tokens()]
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(1_00, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = DownBlockaD # noqa F405
lowerCamelCase__ : Tuple = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ : int = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AttnDownBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Dict = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps', 'MPS result is not consistent' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = SkipDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = DownEncoderBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCamelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ : Any = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = UNetMidBlockaD # noqa F405
lowerCamelCase__ : str = 'mid'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
lowerCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ : List[str] = 'mid'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Dict = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ : Optional[Any] = 'mid'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = UpBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ : str = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_, include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[Any] = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AttnUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
@unittest.skipIf(torch_device == 'mps', 'MPS result is not consistent' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = SkipUpBlockaD # noqa F405
lowerCamelCase__ : Union[str, Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = UpDecoderBlockaD # noqa F405
lowerCamelCase__ : Dict = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCamelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCamelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(lowerCamelCase_ )
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return str(_lowerCamelCase ) == str(_lowerCamelCase )[::-1]
def lowerCamelCase_ ( _lowerCamelCase ):
return int(_lowerCamelCase ) + int(str(_lowerCamelCase )[::-1] )
def lowerCamelCase_ ( _lowerCamelCase = 1_0000 ):
lowerCamelCase__ : Optional[Any] = []
for num in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = num
while iterations < 50:
lowerCamelCase__ : Optional[Any] = sum_reverse(_lowerCamelCase )
iterations += 1
if is_palindrome(_lowerCamelCase ):
break
else:
lychrel_nums.append(_lowerCamelCase )
return len(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A_ : Union[str, Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A_ : int = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'maskformer'
lowerCamelCase__ : Tuple = {'hidden_size': 'mask_feature_size'}
lowerCamelCase__ : List[Any] = ['resnet', 'swin']
lowerCamelCase__ : Union[str, Any] = ['detr']
def __init__(self, lowerCamelCase_ = 2_5_6, lowerCamelCase_ = 2_5_6, lowerCamelCase_ = 0.1, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1.0, lowerCamelCase_ = 1.0, lowerCamelCase_ = 1.0, lowerCamelCase_ = 20.0, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : Optional[int] = SwinConfig(
image_size=3_8_4, in_channels=3, patch_size=4, embed_dim=1_2_8, depths=[2, 2, 1_8, 2], num_heads=[4, 8, 1_6, 3_2], window_size=1_2, drop_path_rate=0.3, out_features=['stage1', 'stage2', 'stage3', 'stage4'], )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = backbone_config.pop('model_type' )
lowerCamelCase__ : Tuple = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : int = config_class.from_dict(lowerCamelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : Union[str, Any] = (
decoder_config.pop('model_type' ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : int = config_class.from_dict(lowerCamelCase_ )
lowerCamelCase__ : int = backbone_config
lowerCamelCase__ : List[str] = decoder_config
# main feature dimension for the model
lowerCamelCase__ : Optional[Any] = fpn_feature_size
lowerCamelCase__ : int = mask_feature_size
# initializer
lowerCamelCase__ : Optional[Any] = init_std
lowerCamelCase__ : Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : Union[str, Any] = dice_weight
lowerCamelCase__ : str = mask_weight
lowerCamelCase__ : int = use_auxiliary_loss
lowerCamelCase__ : List[Any] = no_object_weight
lowerCamelCase__ : int = output_auxiliary_logits
lowerCamelCase__ : Any = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : int = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase_, decoder_config=lowerCamelCase_, **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : str = self.backbone_config.to_dict()
lowerCamelCase__ : Optional[Any] = self.decoder_config.to_dict()
lowerCamelCase__ : Any = self.__class__.model_type
return output
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = iter(_lowerCamelCase )
while True:
lowerCamelCase__ : str = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase__ : Union[str, Any] = ''
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase_ ( _lowerCamelCase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCamelCase__ : str = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase__ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = generate_table(_lowerCamelCase )
lowerCamelCase__ : Dict = prepare_input(_lowerCamelCase )
lowerCamelCase__ : str = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = generate_table(_lowerCamelCase )
lowerCamelCase__ : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowerCamelCase__ , lowerCamelCase__ : int = divmod(table.index(_lowerCamelCase ) , 5 )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[str] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Tuple = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCamelCase__ : Dict = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCamelCase__ : str = {'unk_token': '<unk>'}
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[Any] = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase_ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token='!', **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token='!', **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer()
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase_ )
lowerCamelCase__ : int = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowerCamelCase__ : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = image_processor(lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : Dict = processor(images=lowerCamelCase_, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : Any = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 'lower newer'
lowerCamelCase__ : Tuple = processor(text=lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : List[str] = tokenizer(lowerCamelCase_, return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : str = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'google/owlvit-base-patch32'
lowerCamelCase__ : int = OwlViTProcessor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = ['cat', 'nasa badge']
lowerCamelCase__ : Optional[int] = processor(text=lowerCamelCase_ )
lowerCamelCase__ : str = 1_6
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'google/owlvit-base-patch32'
lowerCamelCase__ : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = [['cat', 'nasa badge'], ['person']]
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 1_6
lowerCamelCase__ : int = len(lowerCamelCase_ )
lowerCamelCase__ : Dict = max([len(lowerCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'google/owlvit-base-patch32'
lowerCamelCase__ : List[str] = OwlViTProcessor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = ['cat', 'nasa badge']
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_ )
lowerCamelCase__ : int = 1_6
lowerCamelCase__ : Tuple = inputs['input_ids']
lowerCamelCase__ : Any = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : int = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = processor(images=lowerCamelCase_, query_images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : List[Any] = processor.batch_decode(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = KandinskyVaaInpaintPipeline
lowerCamelCase__ : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase__ : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase__ : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : int = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : str = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.dummy_unet
lowerCamelCase__ : int = self.dummy_movq
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.00_085, beta_end=0.012, clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, steps_offset=1, prediction_type='epsilon', thresholding=lowerCamelCase_, )
lowerCamelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : str = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
lowerCamelCase__ : Tuple = np.ones((6_4, 6_4), dtype=np.floataa )
lowerCamelCase__ : List[Any] = 0
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[str] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'cpu'
lowerCamelCase__ : int = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : int = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def a__ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = 'a hat'
lowerCamelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : int = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint', torch_dtype=torch.floataa )
lowerCamelCase__ : List[Any] = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : int = pipe_prior(
lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase__ : List[str] = pipeline(
image=lowerCamelCase_, mask_image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
lowerCamelCase__ : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase__ : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ (self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = pipeline(task='fill-mask', model='sshleifer/tiny-distilroberta-base', top_k=2, framework='tf' )
lowerCamelCase__ : Any = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 3_8_0_1_5, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 2_5_5_0_6, 'token_str': ' accuser'},
], )
lowerCamelCase__ : Optional[int] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 3_8_0_1_5,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 2_5_5_0_6,
'token_str': ' accuser',
},
], )
lowerCamelCase__ : List[str] = unmasker('My name is <mask>', targets=[' Patrick', ' Clara', ' Teven'], top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
], )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = pipeline(task='fill-mask', model='sshleifer/tiny-distilroberta-base', top_k=2, framework='pt' )
lowerCamelCase__ : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 3_5_6_7_6, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
], )
lowerCamelCase__ : Optional[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
], )
lowerCamelCase__ : int = unmasker('My name is <mask>', targets=[' Patrick', ' Clara', ' Teven'], top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2_9_4_1, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
], )
lowerCamelCase__ : Optional[Any] = unmasker('My name is <mask> <mask>', top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=6 ), [
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
], )
@require_torch_gpu
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = pipeline('fill-mask', model='hf-internal-testing/tiny-random-distilbert', device=0, framework='pt' )
# convert model to fp16
pipe.model.half()
lowerCamelCase__ : Optional[int] = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
@slow
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = pipeline(task='fill-mask', model='distilroberta-base', top_k=2, framework='pt' )
self.run_large_test(lowerCamelCase_ )
@slow
@require_tf
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = pipeline(task='fill-mask', model='distilroberta-base', top_k=2, framework='tf' )
self.run_large_test(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{'sequence': 'My name is John', 'score': 0.008, 'token': 6_1_0, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_5_7_3, 'token_str': ' Chris'},
], )
lowerCamelCase__ : str = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_2_0_1,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_2_7_9_0,
'token_str': ' Lyon',
},
], )
lowerCamelCase__ : Union[str, Any] = unmasker('My name is <mask>', targets=[' Patrick', ' Clara', ' Teven'], top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_9_4_1, 'token_str': ' Te'},
], )
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = pipeline(task='fill-mask', model='sshleifer/tiny-distilroberta-base', framework='pt' )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
self.run_pipeline_test(lowerCamelCase_, [] )
@require_tf
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = pipeline(task='fill-mask', model='sshleifer/tiny-distilroberta-base', framework='tf' )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = None
self.run_pipeline_test(lowerCamelCase_, [] )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowerCamelCase__ : Dict = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
lowerCamelCase__ : Tuple = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = fill_masker.tokenizer
lowerCamelCase__ : int = fill_masker.model
lowerCamelCase__ : Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''', )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
lowerCamelCase__ : Optional[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
lowerCamelCase__ : Optional[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCamelCase_, [
[
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
],
[
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
],
], )
with self.assertRaises(lowerCamelCase_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase_ ):
fill_masker('This is' )
self.run_test_top_k(lowerCamelCase_, lowerCamelCase_ )
self.run_test_targets(lowerCamelCase_, lowerCamelCase_ )
self.run_test_top_k_targets(lowerCamelCase_, lowerCamelCase_ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase_, lowerCamelCase_ )
self.fill_mask_with_multiple_masks(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer.get_vocab()
lowerCamelCase__ : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase__ : Optional[int] = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_, targets=lowerCamelCase_ )
lowerCamelCase__ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
lowerCamelCase__ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs}, lowerCamelCase_ )
lowerCamelCase__ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs}, set(lowerCamelCase_ ) )
# Call argument
lowerCamelCase__ : int = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
lowerCamelCase__ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs}, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs}, set(lowerCamelCase_ ) )
# Score equivalence
lowerCamelCase__ : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [top_mask['token_str'] for top_mask in outputs]
lowerCamelCase__ : List[Any] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase_ ) == set(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCamelCase_ )
lowerCamelCase__ : Any = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase_ ), nested_simplify(lowerCamelCase_ ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[''] )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''', targets='' )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_, top_k=2 )
lowerCamelCase__ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
lowerCamelCase__ : int = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
lowerCamelCase__ : int = fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2 )
self.assertEqual(
lowerCamelCase_, [
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
], )
self.assertEqual(nested_simplify(lowerCamelCase_ ), nested_simplify(lowerCamelCase_ ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = tokenizer.get_vocab()
lowerCamelCase__ : List[Any] = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
# top_k=2, ntargets=3
lowerCamelCase__ : Optional[int] = sorted(vocab.keys() )[:3]
lowerCamelCase__ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2, targets=lowerCamelCase_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase__ : Any = [el['token_str'] for el in sorted(lowerCamelCase_, key=lambda lowerCamelCase_ : x["score"], reverse=lowerCamelCase_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase_ ).issubset(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=3, targets=lowerCamelCase_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase_ ), nested_simplify(lowerCamelCase_ ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
lowerCamelCase__ : List[str] = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase__ : Tuple = sorted(vocab.keys() )[:3]
lowerCamelCase__ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase__ : Any = fill_masker(f'''My name is {tokenizer.mask_token}''', targets=lowerCamelCase_, top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase_ ), 3 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FillMaskPipeline(model=lowerCamelCase_, tokenizer=lowerCamelCase_ )
lowerCamelCase__ : List[str] = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''', top_k=2 )
self.assertEqual(
lowerCamelCase_, [
[
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
],
[
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
],
[
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
{'sequence': ANY(lowerCamelCase_ ), 'score': ANY(lowerCamelCase_ ), 'token': ANY(lowerCamelCase_ ), 'token_str': ANY(lowerCamelCase_ )},
],
], )
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ : str = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.', lowerCamelCase_, )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ): # noqa: E741
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = 0
lowerCamelCase__ : Any = [0] * n
lowerCamelCase__ : str = [False] * n
lowerCamelCase__ : Optional[int] = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if parent == root:
out_edge_count += 1
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCamelCase__ : str = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : str = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCamelCase__ : Union[str, Any] = True
# AP found via cycle
if at == low[to]:
lowerCamelCase__ : List[str] = True
else:
lowerCamelCase__ : Dict = min(low[at] , _lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = dfs(_lowerCamelCase , _lowerCamelCase , -1 , _lowerCamelCase )
lowerCamelCase__ : int = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
A_ : Any = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : str = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A_ : Tuple = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['input_features', 'attention_mask']
def __init__(self, lowerCamelCase_=8_0, lowerCamelCase_=1_6_0_0_0, lowerCamelCase_=0.0, lowerCamelCase_=1_0, lowerCamelCase_=2_5, lowerCamelCase_="hamming_window", lowerCamelCase_=32_768.0, lowerCamelCase_=0.97, lowerCamelCase_=1.0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase_, sampling_rate=lowerCamelCase_, padding_value=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = feature_size
lowerCamelCase__ : Union[str, Any] = sampling_rate
lowerCamelCase__ : List[str] = padding_value
lowerCamelCase__ : Optional[int] = hop_length
lowerCamelCase__ : List[Any] = win_length
lowerCamelCase__ : Tuple = frame_signal_scale
lowerCamelCase__ : Tuple = preemphasis_coeff
lowerCamelCase__ : Optional[Any] = mel_floor
lowerCamelCase__ : str = normalize_means
lowerCamelCase__ : Dict = normalize_vars
lowerCamelCase__ : List[str] = win_function
lowerCamelCase__ : Optional[Any] = return_attention_mask
lowerCamelCase__ : int = win_length * sampling_rate // 1_0_0_0
lowerCamelCase__ : Any = hop_length * sampling_rate // 1_0_0_0
lowerCamelCase__ : List[str] = optimal_fft_length(self.sample_size )
lowerCamelCase__ : str = (self.n_fft // 2) + 1
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCamelCase__ : str = window_function(window_length=self.sample_size, name=self.win_function, periodic=lowerCamelCase_ )
else:
lowerCamelCase__ : str = window_function(window_length=self.sample_size, name=self.win_function )
lowerCamelCase__ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
lowerCamelCase__ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale, window=lowerCamelCase_, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=lowerCamelCase_, preemphasis=self.preemphasis_coeff, mel_filters=lowerCamelCase_, mel_floor=self.mel_floor, log_mel='log', )
return msfc_features.T
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.normalize_means:
lowerCamelCase__ : Tuple = x[:input_length].mean(axis=0 )
lowerCamelCase__ : Optional[int] = np.subtract(lowerCamelCase_, lowerCamelCase_ )
if self.normalize_vars:
lowerCamelCase__ : Dict = x[:input_length].std(axis=0 )
lowerCamelCase__ : Dict = np.divide(lowerCamelCase_, lowerCamelCase_ )
if input_length < x.shape[0]:
lowerCamelCase__ : Optional[Any] = padding_value
# make sure array is in float32
lowerCamelCase__ : int = x.astype(np.floataa )
return x
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_, lowerCamelCase_, self.padding_value ) for x, n in zip(lowerCamelCase_, lowerCamelCase_ )]
def __call__(self, lowerCamelCase_, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : List[Any] = isinstance(lowerCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ : int = is_batched_numpy or (
isinstance(lowerCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : int = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_, np.ndarray ):
lowerCamelCase__ : Tuple = np.asarray(lowerCamelCase_, dtype=np.floataa )
elif isinstance(lowerCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : str = [raw_speech]
# extract fbank features
lowerCamelCase__ : Dict = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ : List[str] = BatchFeature({'input_features': features} )
lowerCamelCase__ : Any = self.pad(
lowerCamelCase_, padding=lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_, pad_to_multiple_of=lowerCamelCase_, return_attention_mask=lowerCamelCase_, **lowerCamelCase_, )
# make sure list is in array format
lowerCamelCase__ : int = padded_inputs.get('input_features' )
if isinstance(input_features[0], lowerCamelCase_ ):
lowerCamelCase__ : Dict = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ : Any = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCamelCase__ : int = [np.asarray(lowerCamelCase_, dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ : int = (
np.array(lowerCamelCase_, dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_, max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ : Tuple = self.normalize(
padded_inputs['input_features'], attention_mask=lowerCamelCase_ )
if return_tensors is not None:
lowerCamelCase__ : str = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A_ : Optional[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : Dict = g.get_repo('huggingface/transformers' )
lowerCamelCase__ : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Any = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
lowerCamelCase__ : Any = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from math import factorial
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class a_ ( nn.Module ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[Any] = nn.Linear(3, 4 )
lowerCamelCase__ : List[Any] = nn.BatchNormad(4 )
lowerCamelCase__ : Optional[Any] = nn.Linear(4, 5 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_, model.state_dict() )
lowerCamelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_, 'index.json' )
self.assertTrue(os.path.isfile(lowerCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCamelCase__ : List[str] = os.path.join(lowerCamelCase_, f'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowerCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCamelCase__ : str = torch.randn(2, 3, dtype=lowerCamelCase_ )
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Optional[Any] = offload_weight(lowerCamelCase_, 'weight', lowerCamelCase_, {} )
lowerCamelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_, 'weight.dat' )
self.assertTrue(os.path.isfile(lowerCamelCase_ ) )
self.assertDictEqual(lowerCamelCase_, {'weight': {'shape': [2, 3], 'dtype': str(lowerCamelCase_ ).split('.' )[1]}} )
lowerCamelCase__ : Dict = load_offloaded_weight(lowerCamelCase_, index['weight'] )
self.assertTrue(torch.equal(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = ModelForTest()
lowerCamelCase__ : Optional[int] = model.state_dict()
lowerCamelCase__ : Dict = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowerCamelCase__ : Tuple = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Tuple = OffloadedWeightsLoader(state_dict=lowerCamelCase_, save_folder=lowerCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_, weight_map[key] ) )
lowerCamelCase__ : List[Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
lowerCamelCase__ : Dict = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = OffloadedWeightsLoader(state_dict=lowerCamelCase_, save_folder=lowerCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_, lowerCamelCase_ )
# Duplicates are removed
lowerCamelCase__ : Optional[Any] = OffloadedWeightsLoader(state_dict=lowerCamelCase_, save_folder=lowerCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_, weight_map[key] ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowerCamelCase__ : List[Any] = extract_submodules_state_dict(lowerCamelCase_, ['a.1', 'a.2'] )
self.assertDictEqual(lowerCamelCase_, {'a.1': 0, 'a.2': 2} )
lowerCamelCase__ : Dict = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowerCamelCase__ : List[Any] = extract_submodules_state_dict(lowerCamelCase_, ['a.1', 'a.2'] )
self.assertDictEqual(lowerCamelCase_, {'a.1.a': 0, 'a.2.a': 2} )
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[int] = "T5Config"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = jnp.zeros_like(_lowerCamelCase )
lowerCamelCase__ : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase__ : List[str] = shifted_input_ids.at[:, 0].set(_lowerCamelCase )
lowerCamelCase__ : int = jnp.where(shifted_input_ids == -100 , _lowerCamelCase , _lowerCamelCase )
return shifted_input_ids
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'mt5'
lowerCamelCase__ : Optional[Any] = MTaConfig
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'mt5'
lowerCamelCase__ : List[Any] = MTaConfig
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'mt5'
lowerCamelCase__ : List[Any] = MTaConfig
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
from math import isqrt
def lowerCamelCase_ ( _lowerCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCamelCase ) + 1 ) )
def lowerCamelCase_ ( _lowerCamelCase = 10**6 ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = 1
lowerCamelCase__ : Tuple = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
A_ : Union[str, Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
A_ : List[str] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
A_ : Union[str, Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_, lowerCamelCase_, sample_weight=lowerCamelCase_ ) ),
}
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
A_ : Dict = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = set()
# keep track of all the paths to be checked
lowerCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase__ : Any = queue.pop(0 )
# get the last node from the path
lowerCamelCase__ : List[str] = path[-1]
if node not in explored:
lowerCamelCase__ : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase__ : Union[str, Any] = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase__ : Any = [start]
lowerCamelCase__ : List[Any] = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
lowerCamelCase__ : Tuple = {start: 0, target: -1}
while queue:
lowerCamelCase__ : Optional[int] = queue.pop(0 )
if node == target:
lowerCamelCase__ : Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
lowerCamelCase__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCamelCase__ : str = 128
elif "12-12" in model_name:
lowerCamelCase__ : Union[str, Any] = 12
lowerCamelCase__ : str = 12
elif "14-14" in model_name:
lowerCamelCase__ : Union[str, Any] = 14
lowerCamelCase__ : str = 14
elif "16-16" in model_name:
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : int = 16
else:
raise ValueError('Model not supported' )
lowerCamelCase__ : List[str] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowerCamelCase__ : Optional[Any] = 35
lowerCamelCase__ : Any = 'speech-commands-v2-id2label.json'
else:
lowerCamelCase__ : Optional[int] = 527
lowerCamelCase__ : Any = 'audioset-id2label.json'
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase ):
if "module.v" in name:
lowerCamelCase__ : str = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowerCamelCase__ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowerCamelCase__ : int = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowerCamelCase__ : str = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Any = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCamelCase__ : Dict = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowerCamelCase__ : Any = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Any = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : int = key.split('.' )
lowerCamelCase__ : Any = int(key_split[3] )
lowerCamelCase__ : Optional[int] = config.hidden_size
if "weight" in key:
lowerCamelCase__ : Dict = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase__ : str = val[-dim:, :]
else:
lowerCamelCase__ : Tuple = val[:dim]
lowerCamelCase__ : str = val[dim : dim * 2]
lowerCamelCase__ : Optional[int] = val[-dim:]
else:
lowerCamelCase__ : Union[str, Any] = val
return orig_state_dict
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Optional[int] = get_audio_spectrogram_transformer_config(_lowerCamelCase )
lowerCamelCase__ : str = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowerCamelCase__ : List[Any] = model_name_to_url[model_name]
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# remove some keys
remove_keys(_lowerCamelCase )
# rename some keys
lowerCamelCase__ : List[str] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
# load 🤗 model
lowerCamelCase__ : List[Any] = ASTForAudioClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCamelCase__ : Dict = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
lowerCamelCase__ : Optional[Any] = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
lowerCamelCase__ : Optional[int] = 1024 if 'speech-commands' not in model_name else 128
lowerCamelCase__ : Tuple = ASTFeatureExtractor(mean=_lowerCamelCase , std=_lowerCamelCase , max_length=_lowerCamelCase )
if "speech-commands" in model_name:
lowerCamelCase__ : Dict = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowerCamelCase__ : str = dataset[0]['audio']['array']
else:
lowerCamelCase__ : List[Any] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowerCamelCase__ , lowerCamelCase__ : Dict = torchaudio.load(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = waveform.squeeze().numpy()
lowerCamelCase__ : str = feature_extractor(_lowerCamelCase , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
lowerCamelCase__ : List[Any] = model(**_lowerCamelCase )
lowerCamelCase__ : Dict = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCamelCase__ : int = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCamelCase__ : Union[str, Any] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCamelCase__ : Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCamelCase__ : Optional[int] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCamelCase__ : Tuple = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCamelCase__ : Optional[int] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCamelCase__ : str = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Optional[int] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A_ : Optional[Any] = ["text", "image", "audio"]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
inputs.append(create_inputs(_lowerCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = []
for output in outputs:
if isinstance(_lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a_ :
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, 'inputs' ) )
self.assertTrue(hasattr(self.tool, 'outputs' ) )
lowerCamelCase__ : Optional[int] = self.tool.inputs
for _input in inputs:
if isinstance(_input, lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCamelCase__ : List[Any] = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ : List[Any] = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ), self.tool.outputs )
def a__ (self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, 'description' ) )
self.assertTrue(hasattr(self.tool, 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCamelCase__ : Optional[int] = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = [outputs]
self.assertEqual(len(lowerCamelCase_ ), len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_, self.tool.outputs ):
lowerCamelCase__ : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = create_inputs(self.tool.inputs )
lowerCamelCase__ : Optional[int] = []
for _input, input_type in zip(lowerCamelCase_, self.tool.inputs ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ : int = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : str = [outputs]
self.assertEqual(len(lowerCamelCase_ ), len(self.tool.outputs ) )
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase__ : Dict = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase__ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase__ : Any = primes[:idx]
break
lowerCamelCase__ , lowerCamelCase__ : int = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase__ : Optional[Any] = False
for r in range(_lowerCamelCase ):
lowerCamelCase__ : Any = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase__ : Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase_ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 100_0000 ):
lowerCamelCase__ : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'switch_transformers'
lowerCamelCase__ : Any = ['past_key_values']
lowerCamelCase__ : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, lowerCamelCase_=3_2_1_2_8, lowerCamelCase_=7_6_8, lowerCamelCase_=6_4, lowerCamelCase_=2_0_4_8, lowerCamelCase_=6_4, lowerCamelCase_=1_2, lowerCamelCase_=3, lowerCamelCase_=1_2, lowerCamelCase_=3, lowerCamelCase_=1_2, lowerCamelCase_=8, lowerCamelCase_=False, lowerCamelCase_=0.01, lowerCamelCase_="float32", lowerCamelCase_=False, lowerCamelCase_=3_2, lowerCamelCase_=1_2_8, lowerCamelCase_=0.1, lowerCamelCase_=1e-6, lowerCamelCase_=0.001, lowerCamelCase_=0.001, lowerCamelCase_=1.0, lowerCamelCase_="relu", lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=1, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : str = d_model
lowerCamelCase__ : str = d_kv
lowerCamelCase__ : Optional[int] = d_ff
lowerCamelCase__ : Optional[int] = num_sparse_encoder_layers
lowerCamelCase__ : Any = num_layers
lowerCamelCase__ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : Optional[int] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase__ : str = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase__ : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase__ : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase__ : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase__ : List[str] = num_heads
lowerCamelCase__ : Optional[Any] = num_experts
lowerCamelCase__ : Any = expert_capacity
lowerCamelCase__ : Union[str, Any] = router_bias
lowerCamelCase__ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowerCamelCase__ : str = router_dtype
lowerCamelCase__ : Optional[int] = router_ignore_padding_tokens
lowerCamelCase__ : Optional[int] = relative_attention_num_buckets
lowerCamelCase__ : Optional[int] = relative_attention_max_distance
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : List[Any] = layer_norm_epsilon
lowerCamelCase__ : int = initializer_factor
lowerCamelCase__ : List[Any] = feed_forward_proj
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = add_router_probs
lowerCamelCase__ : str = router_z_loss_coef
lowerCamelCase__ : Optional[Any] = router_aux_loss_coef
lowerCamelCase__ : Optional[Any] = self.feed_forward_proj.split('-' )
lowerCamelCase__ : Any = act_info[-1]
lowerCamelCase__ : Any = act_info[0] == 'gated'
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : str = 'gelu_new'
super().__init__(
pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, **lowerCamelCase_, )
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCamelCase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Optional[int] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), )
lowerCamelCase__ : List[str] = floats_tensor(control_image.shape, rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCamelCase__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a__ (self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
torch.manual_seed(0 )
def init_weights(lowerCamelCase_ ):
if isinstance(lowerCamelCase_, torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase__ : List[str] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = ControlNetModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=3_2, conditioning_embedding_out_channels=(1_6, 3_2), )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : Tuple = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Tuple = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase__ : Union[str, Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Dict = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Dict = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor), generator=lowerCamelCase_, device=torch.device(lowerCamelCase_ ), ),
]
lowerCamelCase__ : Any = floats_tensor(control_image[0].shape, rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : List[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCamelCase__ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_dummy_components()
lowerCamelCase__ : List[str] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 10.0
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Any = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = steps
lowerCamelCase__ : int = scale
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ )[0]
lowerCamelCase__ : int = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = steps
lowerCamelCase__ : Union[str, Any] = scale
lowerCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase_, control_guidance_start=0.1, control_guidance_end=0.2 )[0]
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : int = steps
lowerCamelCase__ : Any = scale
lowerCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase_, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Dict = steps
lowerCamelCase__ : List[str] = scale
lowerCamelCase__ : Any = pipe(**lowerCamelCase_, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def a__ (self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCamelCase__ : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', safety_checker=lowerCamelCase_, controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ : List[str] = 'evil space-punk bird'
lowerCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
lowerCamelCase__ : Union[str, Any] = pipe(
lowerCamelCase_, lowerCamelCase_, control_image=lowerCamelCase_, generator=lowerCamelCase_, output_type='np', num_inference_steps=5_0, strength=0.6, )
lowerCamelCase__ : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
lowerCamelCase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowerCamelCase__ : List[Any] = math.log(len(_lowerCamelCase ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : Tuple = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
A_ : Optional[Any] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
A_ : Dict = {f"funnel-transformer/{name}": 5_12 for name in _model_names}
A_ : str = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : List[str] = FunnelTokenizer
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : int = 2
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_="<unk>", lowerCamelCase_="<sep>", lowerCamelCase_="<pad>", lowerCamelCase_="<cls>", lowerCamelCase_="<mask>", lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_="##", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, clean_text=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, wordpieces_prefix=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents', lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase__ : List[str] = getattr(lowerCamelCase_, normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[int] = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : Any = tokenize_chinese_chars
lowerCamelCase__ : List[str] = normalizer_class(**lowerCamelCase_ )
lowerCamelCase__ : int = do_lower_case
def a__ (self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'roberta-prelayernorm'
def __init__(self, lowerCamelCase_=5_0_2_6_5, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase_ ( _lowerCamelCase = True , *_lowerCamelCase , **_lowerCamelCase ):
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
lowerCamelCase__ : int = False
if main_process_only:
lowerCamelCase__ : Optional[Any] = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = x
lowerCamelCase__ : List[str] = y
for step in range(_lowerCamelCase ): # noqa: B007
lowerCamelCase__ : Optional[Any] = a * a - b * b + x
lowerCamelCase__ : Any = 2 * a * b + y
lowerCamelCase__ : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase_ ( _lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase_ ( _lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCamelCase , 1 , 1 ) )
def lowerCamelCase_ ( _lowerCamelCase = 800 , _lowerCamelCase = 600 , _lowerCamelCase = -0.6 , _lowerCamelCase = 0 , _lowerCamelCase = 3.2 , _lowerCamelCase = 50 , _lowerCamelCase = True , ):
lowerCamelCase__ : int = Image.new('RGB' , (image_width, image_height) )
lowerCamelCase__ : int = img.load()
# loop through the image-coordinates
for image_x in range(_lowerCamelCase ):
for image_y in range(_lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[int] = figure_width / image_width * image_height
lowerCamelCase__ : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : int = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : List[str] = get_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : Dict = get_color_coded_rgb(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = get_black_and_white_rgb(_lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : Any = ["gpt2"]
A_ : Any = "gpt2"
if is_tf_available():
class a_ ( tf.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : List[str] = tokenizer
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Any = TFGPTaLMHeadModel.from_config(lowerCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name='text' ),) )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tokenized['input_ids'].to_tensor()
lowerCamelCase__ : str = tf.cast(input_ids_dense > 0, tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__ : Union[str, Any] = self.model(input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Optional[Any] = [GPTaTokenizer.from_pretrained(lowerCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__ : Any = [TFGPTaTokenizer.from_pretrained(lowerCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase__ : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCamelCase__ : Optional[int] = list(zip(self.test_sentences, self.test_sentences[::-1] ) )
def a__ (self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase__ : Union[str, Any] = tokenizer([test_inputs], return_tensors='tf' )
lowerCamelCase__ : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__ : Dict = python_outputs[key].numpy()
lowerCamelCase__ : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase_, tf.intaa ) == tf_outputs_values ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : Any = tf.function(lowerCamelCase_ )
for test_inputs in self.test_sentences:
lowerCamelCase__ : List[str] = tf.constant(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = compiled_tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Any = tf_tokenizer(lowerCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : Dict = ModelToSave(tokenizer=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Tuple = model.serving(lowerCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : Any = Path(lowerCamelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCamelCase_, lowerCamelCase_, signatures={'serving_default': model.serving} )
lowerCamelCase__ : List[str] = tf.saved_model.load(lowerCamelCase_ )
lowerCamelCase__ : List[str] = loaded_model.signatures['serving_default'](lowerCamelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Optional[int] = tf_tokenizer(lowerCamelCase_ ) # Build model with some sample inputs
lowerCamelCase__ : int = tf_tokenizer.get_config()
lowerCamelCase__ : List[Any] = TFGPTaTokenizer.from_config(lowerCamelCase_ )
lowerCamelCase__ : Tuple = model_from_config(lowerCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ (self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__ : Any = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Any = tf_tokenizer(lowerCamelCase_, max_length=lowerCamelCase_ )
lowerCamelCase__ : Any = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[torch.FloatTensor] = None
lowerCamelCase__ : torch.FloatTensor = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=5_1_2, lowerCamelCase_="cls", lowerCamelCase_=False, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = project_dim
lowerCamelCase__ : int = pooler_fn
lowerCamelCase__ : Dict = learn_encoder
lowerCamelCase__ : Dict = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = [R'pooler', R'logit_scale']
lowerCamelCase__ : Optional[Any] = [R'position_ids', R'predictions.decoder.bias']
lowerCamelCase__ : str = 'roberta'
lowerCamelCase__ : List[str] = RobertaSeriesConfig
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = XLMRobertaModel(lowerCamelCase_ )
lowerCamelCase__ : List[str] = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : int = getattr(lowerCamelCase_, 'has_pre_transformation', lowerCamelCase_ )
if self.has_pre_transformation:
lowerCamelCase__ : int = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : Optional[int] = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def a__ (self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] = self.base_model(
input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, position_ids=lowerCamelCase_, head_mask=lowerCamelCase_, inputs_embeds=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_attentions=lowerCamelCase_, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCamelCase_, )
if self.has_pre_transformation:
lowerCamelCase__ : Any = outputs['hidden_states'][-2]
lowerCamelCase__ : int = self.pre_LN(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
lowerCamelCase__ : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['pixel_values']
def __init__(self, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = PILImageResampling.BICUBIC, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = True, lowerCamelCase_ = 1 / 2_5_5, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = size if size is not None else {'shortest_edge': 2_2_4}
lowerCamelCase__ : Union[str, Any] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_, param_name='crop_size' )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : Dict = resample
lowerCamelCase__ : Dict = do_center_crop
lowerCamelCase__ : Tuple = crop_size
lowerCamelCase__ : Dict = do_rescale
lowerCamelCase__ : str = rescale_factor
lowerCamelCase__ : str = do_normalize
lowerCamelCase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : int = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Tuple = do_convert_rgb
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = PILImageResampling.BICUBIC, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_, default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(lowerCamelCase_, size=size['shortest_edge'], default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_, size=lowerCamelCase_, resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_, size=(size['height'], size['width']), data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return rescale(lowerCamelCase_, scale=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
return normalize(lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = ChannelDimension.FIRST, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : str = get_size_dict(lowerCamelCase_, param_name='size', default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Dict = resample if resample is not None else self.resample
lowerCamelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Dict = get_size_dict(lowerCamelCase_, param_name='crop_size', default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : int = image_std if image_std is not None else self.image_std
lowerCamelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : Optional[Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : List[str] = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : str = [self.resize(image=lowerCamelCase_, size=lowerCamelCase_, resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[int] = [self.center_crop(image=lowerCamelCase_, size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : Optional[int] = [self.rescale(image=lowerCamelCase_, scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : List[str] = [self.normalize(image=lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[int] = [to_channel_dimension_format(lowerCamelCase_, lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[int] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : Optional[int] = 'backbone.' if is_semantic else ''
lowerCamelCase__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : Tuple = 'backbone.' if is_semantic else ''
# queries, keys and values
lowerCamelCase__ : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
lowerCamelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
lowerCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : int = q_bias
lowerCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
lowerCamelCase__ : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
lowerCamelCase__ : Union[str, Any] = gamma_a
lowerCamelCase__ : Union[str, Any] = gamma_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[Any] = val
def lowerCamelCase_ ( ):
lowerCamelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Dict = False if 'rvlcdip' in checkpoint_url else True
lowerCamelCase__ : List[str] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase__ : Optional[int] = 1024
lowerCamelCase__ : Optional[int] = 4096
lowerCamelCase__ : Any = 24
lowerCamelCase__ : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase__ : Tuple = 16
lowerCamelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCamelCase__ : Union[str, Any] = 'rvlcdip-id2label.json'
lowerCamelCase__ : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
lowerCamelCase__ : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase__ : Optional[Any] = encoding['pixel_values']
lowerCamelCase__ : Any = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Tuple = outputs.logits
# verify logits
lowerCamelCase__ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase__ : Optional[int] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowerCamelCase__ : Optional[int] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
A_ : Any = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Tuple = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class a_ ( _snake_case ):
'''simple docstring'''
lowerCamelCase__ : str = 'roformer'
def __init__(self, lowerCamelCase_=5_0_0_0_0, lowerCamelCase_=None, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_5_3_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_=False, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = rotary_value
lowerCamelCase__ : Dict = use_cache
class a_ ( _snake_case ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case__ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[Any] = value_function
lowerCamelCase__ : Any = unet
lowerCamelCase__ : Optional[Any] = scheduler
lowerCamelCase__ : Union[str, Any] = env
lowerCamelCase__ : Any = env.get_dataset()
lowerCamelCase__ : Tuple = {}
for key in self.data.keys():
try:
lowerCamelCase__ : List[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : List[Any] = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : List[str] = env.observation_space.shape[0]
lowerCamelCase__ : Union[str, Any] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(UpperCAmelCase_ ) is dict:
return {k: self.to_torch(UpperCAmelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Any = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = x.shape[0]
lowerCamelCase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : int = torch.full((batch_size,), UpperCAmelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(UpperCAmelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : Any = self.value_function(x.permute(0, 2, 1 ), UpperCAmelCase_ ).sample
lowerCamelCase__ : List[str] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : str = self.scheduler._get_variance(UpperCAmelCase_ )
lowerCamelCase__ : str = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : int = model_std * grad
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[Any] = x.detach()
lowerCamelCase__ : Any = x + scale * grad
lowerCamelCase__ : Optional[Any] = self.reset_xa(UpperCAmelCase_, UpperCAmelCase_, self.action_dim )
lowerCamelCase__ : Dict = self.unet(x.permute(0, 2, 1 ), UpperCAmelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : List[str] = self.scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, predict_epsilon=UpperCAmelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(UpperCAmelCase_, UpperCAmelCase_, self.action_dim )
lowerCamelCase__ : List[Any] = self.to_torch(UpperCAmelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.normalize(UpperCAmelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(UpperCAmelCase_, axis=0 )
lowerCamelCase__ : Union[str, Any] = {0: self.to_torch(UpperCAmelCase_ )}
lowerCamelCase__ : str = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : Optional[int] = randn_tensor(UpperCAmelCase_, device=self.unet.device )
lowerCamelCase__ : Any = self.reset_xa(UpperCAmelCase_, UpperCAmelCase_, self.action_dim )
lowerCamelCase__ : Optional[int] = self.to_torch(UpperCAmelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.run_diffusion(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
# sort output trajectories by value
lowerCamelCase__ : List[Any] = y.argsort(0, descending=UpperCAmelCase_ ).squeeze()
lowerCamelCase__ : Optional[Any] = x[sorted_idx]
lowerCamelCase__ : List[str] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : List[Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : int = self.de_normalize(UpperCAmelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : Any = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Union[str, Any] = np.random.randint(0, UpperCAmelCase_ )
lowerCamelCase__ : Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A_ : Optional[Any] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = [False] * len(_lowercase )
lowerCamelCase__ : Dict = [-1] * len(_lowercase )
def dfs(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : str = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a_ ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'sew-d'
def __init__(self, lowerCamelCase_=3_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_=2, lowerCamelCase_=5_1_2, lowerCamelCase_=2_5_6, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=("p2c", "c2p"), lowerCamelCase_="layer_norm", lowerCamelCase_="gelu_python", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=1e-7, lowerCamelCase_=1e-5, lowerCamelCase_="group", lowerCamelCase_="gelu", lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2), lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCamelCase_=False, lowerCamelCase_=1_2_8, lowerCamelCase_=1_6, lowerCamelCase_=True, lowerCamelCase_=0.05, lowerCamelCase_=1_0, lowerCamelCase_=2, lowerCamelCase_=0.0, lowerCamelCase_=1_0, lowerCamelCase_=0, lowerCamelCase_="mean", lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=2_5_6, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**__lowerCamelCase, pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase )
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Union[str, Any] = feat_extract_norm
lowerCamelCase__ : List[str] = feat_extract_activation
lowerCamelCase__ : Any = list(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = list(__lowerCamelCase )
lowerCamelCase__ : int = list(__lowerCamelCase )
lowerCamelCase__ : List[str] = conv_bias
lowerCamelCase__ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase__ : int = num_conv_pos_embedding_groups
lowerCamelCase__ : Union[str, Any] = len(self.conv_dim )
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Optional[int] = squeeze_factor
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : List[str] = position_buckets
lowerCamelCase__ : List[str] = share_att_key
lowerCamelCase__ : int = relative_attention
lowerCamelCase__ : Dict = norm_rel_ebd
lowerCamelCase__ : Optional[int] = list(__lowerCamelCase )
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : List[str] = feat_proj_dropout
lowerCamelCase__ : int = final_dropout
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : List[Any] = feature_layer_norm_eps
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : List[str] = apply_spec_augment
lowerCamelCase__ : Optional[int] = mask_time_prob
lowerCamelCase__ : Optional[Any] = mask_time_length
lowerCamelCase__ : Union[str, Any] = mask_time_min_masks
lowerCamelCase__ : List[Any] = mask_feature_prob
lowerCamelCase__ : Optional[Any] = mask_feature_length
lowerCamelCase__ : Any = mask_feature_min_masks
# ctc loss
lowerCamelCase__ : Dict = ctc_loss_reduction
lowerCamelCase__ : List[Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase__ : Optional[Any] = use_weighted_layer_sum
lowerCamelCase__ : Union[str, Any] = classifier_proj_size
@property
def a__ (self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : Tuple = "src/diffusers"
A_ : Optional[Any] = "."
# This is to make sure the diffusers module imported is the one in the repo.
A_ : Optional[int] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ : Any = spec.loader.load_module()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return line.startswith(lowerCamelCase_ ) or len(lowerCamelCase_ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase_ ) is not None
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = object_name.split('.' )
lowerCamelCase__ : Any = 0
# First let's find the module where our object lives.
lowerCamelCase__ : str = parts[i]
while i < len(lowerCamelCase_ ) and not os.path.isfile(os.path.join(lowerCamelCase_ , f'''{module}.py''' ) ):
i += 1
if i < len(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = os.path.join(lowerCamelCase_ , parts[i] )
if i >= len(lowerCamelCase_ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowerCamelCase_ , f'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : List[str] = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase__ : int = ''
lowerCamelCase__ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase_ ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase_ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase__ : Tuple = line_index
while line_index < len(lowerCamelCase_ ) and _should_continue(lines[line_index] , lowerCamelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ : List[str] = lines[start_index:line_index]
return "".join(lowerCamelCase_ )
A_ : List[Any] = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
A_ : str = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
A_ : Any = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = code.split('\n' )
lowerCamelCase__ : List[Any] = 0
while idx < len(lowerCamelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase_ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = len(get_indent(lowerCamelCase_ ) ) > 0
if has_indent:
lowerCamelCase__ : Dict = f'''class Bla:\n{code}'''
lowerCamelCase__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCamelCase_ )
lowerCamelCase__ : Tuple = black.format_str(lowerCamelCase_ , mode=lowerCamelCase_ )
lowerCamelCase__ : Any = style_docstrings_in_code(lowerCamelCase_ )
return result[len('class Bla:\n' ) :] if has_indent else result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : Optional[int] = f.readlines()
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase_ ):
lowerCamelCase__ : Any = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase__ : Tuple = search.groups()
lowerCamelCase__ : Optional[Any] = find_code_in_diffusers(lowerCamelCase_ )
lowerCamelCase__ : Dict = get_indent(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase__ : Optional[int] = theoretical_indent
lowerCamelCase__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase__ : List[Any] = True
while line_index < len(lowerCamelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase_ ):
break
lowerCamelCase__ : Dict = lines[line_index]
lowerCamelCase__ : List[str] = _should_continue(lowerCamelCase_ , lowerCamelCase_ ) and re.search(f'''^{indent}# End copy''' , lowerCamelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ : str = lines[start_index:line_index]
lowerCamelCase__ : Dict = ''.join(lowerCamelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase__ : List[Any] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCamelCase_ ) is None]
lowerCamelCase__ : Dict = '\n'.join(lowerCamelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = replace_pattern.replace('with' , '' ).split(',' )
lowerCamelCase__ : Optional[int] = [_re_replace_pattern.search(lowerCamelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase__ : List[str] = pattern.groups()
lowerCamelCase__ : Optional[Any] = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if option.strip() == "all-casing":
lowerCamelCase__ : Union[str, Any] = re.sub(obja.lower() , obja.lower() , lowerCamelCase_ )
lowerCamelCase__ : Tuple = re.sub(obja.upper() , obja.upper() , lowerCamelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase__ : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase__ : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase__ : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase__ : str = start_index + 1
if overwrite and len(lowerCamelCase_ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase_ )
return diffs
def lowerCamelCase_ ( _lowerCamelCase = False ):
lowerCamelCase__ : List[Any] = glob.glob(os.path.join(lowerCamelCase_ , '**/*.py' ) , recursive=lowerCamelCase_ )
lowerCamelCase__ : Tuple = []
for filename in all_files:
lowerCamelCase__ : int = is_copy_consistent(lowerCamelCase_ , lowerCamelCase_ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : int = '\n'.join(lowerCamelCase_ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
A_ : List[str] = {"facebook/blenderbot-3B": 1_28}
class a_ ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase__ : Optional[Any] = BlenderbotTokenizer
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="replace", lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_=False, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
_lowercase, _lowercase, tokenizer_file=_lowercase, errors=_lowercase, bos_token=_lowercase, eos_token=_lowercase, sep_token=_lowercase, cls_token=_lowercase, unk_token=_lowercase, pad_token=_lowercase, mask_token=_lowercase, add_prefix_space=_lowercase, trim_offsets=_lowercase, **_lowercase, )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', _lowercase ) != add_prefix_space:
lowerCamelCase__ : str = getattr(_lowercase, pre_tok_state.pop('type' ) )
lowerCamelCase__ : str = add_prefix_space
lowerCamelCase__ : Any = pre_tok_class(**_lowercase )
lowerCamelCase__ : List[Any] = add_prefix_space
lowerCamelCase__ : Optional[int] = 'post_processor'
lowerCamelCase__ : Optional[int] = getattr(self.backend_tokenizer, _lowercase, _lowercase )
if tokenizer_component_instance:
lowerCamelCase__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Dict = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase__ : Dict = tuple(state['cls'] )
lowerCamelCase__ : Optional[int] = False
if state.get('add_prefix_space', _lowercase ) != add_prefix_space:
lowerCamelCase__ : int = add_prefix_space
lowerCamelCase__ : str = True
if state.get('trim_offsets', _lowercase ) != trim_offsets:
lowerCamelCase__ : Dict = trim_offsets
lowerCamelCase__ : Any = True
if changes_to_apply:
lowerCamelCase__ : Optional[Any] = getattr(_lowercase, state.pop('type' ) )
lowerCamelCase__ : Tuple = component_class(**_lowercase )
setattr(self.backend_tokenizer, _lowercase, _lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ (self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase ) if isinstance(_lowercase, _lowercase ) else value
lowerCamelCase__ : Any = value
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = kwargs.get('is_split_into_words', _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase, **_lowercase )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = kwargs.get('is_split_into_words', _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase, **_lowercase )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self._tokenizer.model.save(_lowercase, name=_lowercase )
return tuple(_lowercase )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
lowerCamelCase__ : Optional[Any] = ' '.join(_lowercase )
lowerCamelCase__ : Optional[int] = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
lowerCamelCase__ : Dict = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Dict = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a_ ( __A ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'informer'
lowerCamelCase__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = "student_t", lowerCamelCase_ = "nll", lowerCamelCase_ = 1, lowerCamelCase_ = None, lowerCamelCase_ = "mean", lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 6_4, lowerCamelCase_ = 3_2, lowerCamelCase_ = 3_2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = True, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.05, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = 0.02, lowerCamelCase_=True, lowerCamelCase_ = "prob", lowerCamelCase_ = 5, lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = prediction_length
lowerCamelCase__ : Union[str, Any] = context_length or prediction_length
lowerCamelCase__ : Optional[Any] = distribution_output
lowerCamelCase__ : int = loss
lowerCamelCase__ : str = input_size
lowerCamelCase__ : Optional[Any] = num_time_features
lowerCamelCase__ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Any = scaling
lowerCamelCase__ : Any = num_dynamic_real_features
lowerCamelCase__ : Any = num_static_real_features
lowerCamelCase__ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase__ : Dict = cardinality
else:
lowerCamelCase__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase__ : str = embedding_dimension
else:
lowerCamelCase__ : Optional[Any] = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase__ : str = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase__ : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase__ : List[Any] = d_model
lowerCamelCase__ : Optional[Any] = encoder_attention_heads
lowerCamelCase__ : Any = decoder_attention_heads
lowerCamelCase__ : Any = encoder_ffn_dim
lowerCamelCase__ : Dict = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = encoder_layers
lowerCamelCase__ : Union[str, Any] = decoder_layers
lowerCamelCase__ : str = dropout
lowerCamelCase__ : Tuple = attention_dropout
lowerCamelCase__ : Optional[int] = activation_dropout
lowerCamelCase__ : Tuple = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : Any = init_std
lowerCamelCase__ : Tuple = use_cache
# Informer
lowerCamelCase__ : Optional[int] = attention_type
lowerCamelCase__ : Union[str, Any] = sampling_factor
lowerCamelCase__ : Tuple = distil
super().__init__(is_encoder_decoder=lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCamelCase__ : Any = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCamelCase__ : int = 0.01
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
lowerCamelCase__ : Tuple = time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = 'a' * 1000 + '.lock'
lowerCamelCase__ : Optional[int] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase__ : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
A_ : int = TypeVar("_T")
class a_ ( Generic[_T] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : list[_T] = list(iterable or [] )
lowerCamelCase__ : list[_T] = []
def __len__(self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__(self ):
'''simple docstring'''
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
self._stacka.append(UpperCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self._stacka.pop
lowerCamelCase__ : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : List[str] = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase__ : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if "metadata" in layer:
lowerCamelCase__ : Optional[int] = layer.split('metadata' )
lowerCamelCase__ : Union[str, Any] = "".join(split_layer[0] )[:-1]
lowerCamelCase__ : Dict = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCamelCase__ : List[str] = layer.split('kvstore' )
lowerCamelCase__ : Optional[int] = "".join(split_layer[0] )[:-1]
lowerCamelCase__ : List[Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCamelCase__ : Tuple = layer.split('/' )
lowerCamelCase__ : int = "/".join(split_layer[:-1] )
lowerCamelCase__ : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : int = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : Optional[int] = "file"
else:
lowerCamelCase__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : str = rename_keys(_lowerCAmelCase )
lowerCamelCase__ : Dict = {}
for k, v in current_block.items():
lowerCamelCase__ : Union[str, Any] = v
lowerCamelCase__ : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ):
'''simple docstring'''
lowerCamelCase__ : Dict = convert_file_size_to_int(_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCamelCase__ : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
lowerCamelCase__ : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep='/' )
lowerCamelCase__ : Optional[int] = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : Optional[int] = content
else:
lowerCamelCase__ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : int = torch.tensor(_lowerCAmelCase )
lowerCamelCase__ : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ : Optional[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , _lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace('.bin' , f'''-{len(_lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , f'''-{len(_lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
lowerCamelCase__ : Dict = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
lowerCamelCase__ : Tuple = shard
for key in shard:
lowerCamelCase__ : str = shard_file
# Add the metadata
lowerCamelCase__ : Optional[Any] = {"total_size": total_size}
lowerCamelCase__ : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Optional[int] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCamelCase__ : int = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCamelCase__ : List[str] = TaTokenizer.from_pretrained('t5-small' )
lowerCamelCase__ : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
lowerCamelCase__ : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors='pt' ).input_ids
lowerCamelCase__ : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A_ : Dict = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : str = g.get_repo('huggingface/transformers' )
lowerCamelCase__ : List[str] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=__lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__UpperCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__UpperCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__UpperCAmelCase )
return parser.parse_args()
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = parse_args()
# Import training_script as a module.
lowerCamelCase__ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase__ : str = script_fpath.stem
lowerCamelCase__ : Union[str, Any] = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
lowerCamelCase__ : List[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 1_0
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [1, 2, 3, 4]
lowerCamelCase__ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase, self.block_size, 0 ), _UpperCAmelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCamelCase__ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase, self.block_size, 0 ), _UpperCAmelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCamelCase__ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase, self.block_size, 0 ), _UpperCAmelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCamelCase__ , lowerCamelCase__ : str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, [] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ''
lowerCamelCase__ , lowerCamelCase__ : Dict = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, [] )
self.assertEqual(_UpperCAmelCase, [] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCamelCase__ , lowerCamelCase__ : int = process_story(_UpperCAmelCase )
lowerCamelCase__ : str = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = torch.tensor([1, 2, 3, 4] )
lowerCamelCase__ : List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase, 0 ).numpy(), expected.numpy() )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCamelCase__ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase, 2_3 ).numpy(), expected.numpy() )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase, 1 ).numpy(), expected.numpy() )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 1_0_1
lowerCamelCase__ : int = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCamelCase__ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase__ : int = compute_token_type_ids(_UpperCAmelCase, _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase, _UpperCAmelCase )
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : str = 0
lowerCamelCase__ : List[Any] = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Optional[int] = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class a_ ( a__ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(*_A, **_A )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_A )
lowerCamelCase__ : Optional[int] = self.values[key]
def a__ (self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(_A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ (self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0
):
return key
return super()._collision_resolution(_A, _A )
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A_ : Tuple = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
A_ : Optional[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
A_ : Dict = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase__ : str = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : List[Any] = evaluate(dataset=__lowerCamelCase, predictions=__lowerCamelCase )
return score
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (DPMSolverSinglestepScheduler,)
lowerCamelCase__ : int = (('num_inference_steps', 25),)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('inf' ),
'''variance_type''': None,
}
config.update(**lowerCamelCase_ )
return config
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = dict(self.forward_default_kwargs )
lowerCamelCase__ : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = self.dummy_sample
lowerCamelCase__ : Optional[Any] = 0.1 * sample
lowerCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : int = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ : Dict = sample, sample
for t in range(lowerCamelCase_, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__ : List[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCamelCase__ : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.dummy_sample
lowerCamelCase__ : List[str] = 0.1 * sample
lowerCamelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : str = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ : Any = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : str = new_scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if scheduler is None:
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Any = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.scheduler_classes[0]
lowerCamelCase__ : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Dict = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : str = 1_0
lowerCamelCase__ : int = self.dummy_model()
lowerCamelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : List[str] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
return sample
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ : Optional[int] = 5_0
lowerCamelCase__ : Optional[int] = self.dummy_model()
lowerCamelCase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase__ : Union[str, Any] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def a__ (self ):
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ : Optional[int] = self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
lowerCamelCase__ : str = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__ : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def a__ (self ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_, prediction_type=lowerCamelCase_, sample_max_value=lowerCamelCase_, algorithm_type='dpmsolver++', solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, prediction_type=lowerCamelCase_, algorithm_type=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = self.full_loop(
solver_order=lowerCamelCase_, solver_type=lowerCamelCase_, prediction_type=lowerCamelCase_, algorithm_type=lowerCamelCase_, )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def a__ (self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a__ (self ):
'''simple docstring'''
self.check_over_configs(variance_type=lowerCamelCase_ )
self.check_over_configs(variance_type='learned_range' )
def a__ (self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCamelCase_, time_step=0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.full_loop()
lowerCamelCase__ : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCamelCase_ )
lowerCamelCase__ : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase_, dynamic_thresholding_ratio=0 )
lowerCamelCase__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = 1_0
lowerCamelCase__ : Any = self.dummy_model()
lowerCamelCase__ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Tuple = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ : int = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = test_results.split(' ' )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ : str = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _A ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[int] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ : List[str] = line
lowerCamelCase__ : List[Any] = False
return failures
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = title
lowerCamelCase__ : Dict = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ : List[Any] = doc_test_results['success']
lowerCamelCase__ : Optional[int] = doc_test_results['failures']
lowerCamelCase__ : Union[str, Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ : str = doc_test_results
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [self._time_spent]
lowerCamelCase__ : Dict = 0
for time in time_spent:
lowerCamelCase__ : Union[str, Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
lowerCamelCase__ : int = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f'''{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s'''
@property
def a__ (self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a__ (self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def a__ (self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 4_0
lowerCamelCase__ : List[str] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase, __UpperCamelCase )}
lowerCamelCase__ : Dict = ''
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def a__ ():
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], text='There was an issue running the tests.', blocks=__UpperCamelCase, )
def a__ (self ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ : List[str] = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowerCamelCase__ : Union[str, Any] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], blocks=self.payload, text=__UpperCamelCase, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ''
for key, value in failures.items():
lowerCamelCase__ : Optional[Any] = value[:2_0_0] + ' [Truncated]' if len(__UpperCamelCase ) > 2_5_0 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
lowerCamelCase__ : Dict = job_name
lowerCamelCase__ : Optional[int] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ : str = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a__ (self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ : Optional[int] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ : Optional[Any] = sorted(self.doc_test_results.items(), key=lambda lowerCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ : Union[str, Any] = f'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase__ : Optional[int] = job_result['failures']
lowerCamelCase__ : Any = self.get_reply_blocks(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, text=__UpperCamelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'], text=f'''Results for {job}''', blocks=__UpperCamelCase, thread_ts=self.thread_ts['ts'], )
time.sleep(1 )
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ : Dict = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase__ : List[str] = requests.get(_A ).json()
lowerCamelCase__ : Optional[Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ : Tuple = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_A ):
lowerCamelCase__ : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _A )
return {}
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = {}
if os.path.exists(_A ):
lowerCamelCase__ : int = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding='utf-8' ) as f:
lowerCamelCase__ : Any = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(_A , _A )}.''' ) from e
return _artifact
def lowerCamelCase_ ( ):
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = name
lowerCamelCase__ : Tuple = []
def __str__(self ):
'''simple docstring'''
return self.name
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ : int = {}
lowerCamelCase__ : Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ : int = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
A_ : Dict = get_job_links()
A_ : Dict = retrieve_available_artifacts()
A_ : Optional[int] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ : int = github_actions_job_links.get("run_doctests")
A_ : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
A_ : Optional[Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
A_ : str = handle_test_results(artifact["stats"])
A_ : Tuple = failed
A_ : int = success
A_ : Any = time_spent[1:-1] + ''', '''
A_ : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
A_ : List[Any] = line.replace("FAILED ", "")
A_ : Tuple = line.split()[0].replace("\n", "")
if "::" in line:
A_ : Union[str, Any] = line.split("::")
else:
A_ : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
A_ : List[str] = failure
break
A_ : List[Any] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCamelCase__ : str = 128
elif "12-12" in model_name:
lowerCamelCase__ : Optional[int] = 12
lowerCamelCase__ : int = 12
elif "14-14" in model_name:
lowerCamelCase__ : Tuple = 14
lowerCamelCase__ : Any = 14
elif "16-16" in model_name:
lowerCamelCase__ : Optional[int] = 16
lowerCamelCase__ : List[Any] = 16
else:
raise ValueError('Model not supported' )
lowerCamelCase__ : int = 'huggingface/label-files'
if "speech-commands" in model_name:
lowerCamelCase__ : Optional[int] = 35
lowerCamelCase__ : Tuple = 'speech-commands-v2-id2label.json'
else:
lowerCamelCase__ : List[Any] = 527
lowerCamelCase__ : int = 'audioset-id2label.json'
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase ):
if "module.v" in name:
lowerCamelCase__ : List[Any] = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowerCamelCase__ : List[str] = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowerCamelCase__ : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowerCamelCase__ : int = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowerCamelCase__ : Dict = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[Any] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : Union[str, Any] = key.split('.' )
lowerCamelCase__ : Union[str, Any] = int(key_split[3] )
lowerCamelCase__ : List[str] = config.hidden_size
if "weight" in key:
lowerCamelCase__ : Tuple = val[:dim, :]
lowerCamelCase__ : Any = val[dim : dim * 2, :]
lowerCamelCase__ : Optional[int] = val[-dim:, :]
else:
lowerCamelCase__ : int = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : str = val[-dim:]
else:
lowerCamelCase__ : Dict = val
return orig_state_dict
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Any = get_audio_spectrogram_transformer_config(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name]
lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# remove some keys
remove_keys(_lowerCamelCase )
# rename some keys
lowerCamelCase__ : Union[str, Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
# load 🤗 model
lowerCamelCase__ : List[str] = ASTForAudioClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCamelCase__ : Union[str, Any] = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
lowerCamelCase__ : Optional[Any] = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
lowerCamelCase__ : Dict = 1024 if 'speech-commands' not in model_name else 128
lowerCamelCase__ : List[str] = ASTFeatureExtractor(mean=_lowerCamelCase , std=_lowerCamelCase , max_length=_lowerCamelCase )
if "speech-commands" in model_name:
lowerCamelCase__ : Union[str, Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowerCamelCase__ : Optional[int] = dataset[0]['audio']['array']
else:
lowerCamelCase__ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowerCamelCase__ , lowerCamelCase__ : List[str] = torchaudio.load(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = waveform.squeeze().numpy()
lowerCamelCase__ : Optional[Any] = feature_extractor(_lowerCamelCase , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
lowerCamelCase__ : Any = model(**_lowerCamelCase )
lowerCamelCase__ : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCamelCase__ : str = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCamelCase__ : int = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCamelCase__ : List[str] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCamelCase__ : Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCamelCase__ : Optional[int] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCamelCase__ : Tuple = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCamelCase__ : Optional[int] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCamelCase__ : Union[str, Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : str = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A_ : Tuple = logging.get_logger(__name__)
A_ : int = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
A_ : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
A_ : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'whisper'
lowerCamelCase__ : int = ['past_key_values']
lowerCamelCase__ : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_1_8_6_5, lowerCamelCase_=8_0, lowerCamelCase_=6, lowerCamelCase_=4, lowerCamelCase_=6, lowerCamelCase_=4, lowerCamelCase_=1_5_3_6, lowerCamelCase_=1_5_3_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="gelu", lowerCamelCase_=2_5_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=False, lowerCamelCase_=1_5_0_0, lowerCamelCase_=4_4_8, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=None, lowerCamelCase_=[2_2_0, 5_0_2_5_6], lowerCamelCase_=False, lowerCamelCase_=2_5_6, lowerCamelCase_=False, lowerCamelCase_=0.05, lowerCamelCase_=1_0, lowerCamelCase_=2, lowerCamelCase_=0.0, lowerCamelCase_=1_0, lowerCamelCase_=0, lowerCamelCase_=7, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = num_mel_bins
lowerCamelCase__ : Dict = d_model
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Union[str, Any] = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Any = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : Tuple = init_std
lowerCamelCase__ : Optional[int] = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : int = encoder_layers
lowerCamelCase__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : str = max_source_positions
lowerCamelCase__ : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase__ : str = classifier_proj_size
lowerCamelCase__ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : List[str] = apply_spec_augment
lowerCamelCase__ : Optional[int] = mask_time_prob
lowerCamelCase__ : Any = mask_time_length
lowerCamelCase__ : Tuple = mask_time_min_masks
lowerCamelCase__ : Tuple = mask_feature_prob
lowerCamelCase__ : List[Any] = mask_feature_length
lowerCamelCase__ : Union[str, Any] = mask_feature_min_masks
lowerCamelCase__ : Dict = median_filter_width
super().__init__(
pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, is_encoder_decoder=_lowercase, decoder_start_token_id=_lowercase, suppress_tokens=_lowercase, begin_suppress_tokens=_lowercase, **_lowercase, )
class a_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
lowerCamelCase__ : List[str] = {0: """batch"""}
else:
lowerCamelCase__ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowercase, direction='inputs' )
return common_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = 2_2_0_5_0, lowerCamelCase_ = 5.0, lowerCamelCase_ = 2_2_0, ):
'''simple docstring'''
lowerCamelCase__ : Dict = OrderedDict()
lowerCamelCase__ : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=_lowercase, framework=_lowercase, sampling_rate=_lowercase, time_duration=_lowercase, frequency=_lowercase, )
lowerCamelCase__ : Any = encoder_inputs["""input_features"""].shape[2]
lowerCamelCase__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase__ : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer, _lowercase, _lowercase, _lowercase, _lowercase )
lowerCamelCase__ : Optional[int] = encoder_inputs.pop('input_features' )
lowerCamelCase__ : str = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
lowerCamelCase__ : List[Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def a__ (self ):
'''simple docstring'''
return 1e-3
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ : Any = (DDPMParallelScheduler,)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def a__ (self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A_ )
def a__ (self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_, beta_end=A_ )
def a__ (self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def a__ (self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def a__ (self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def a__ (self ):
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_, prediction_type=A_, sample_max_value=A_, )
def a__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def a__ (self ):
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=A_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config()
lowerCamelCase__ : List[str] = scheduler_class(**A_ )
lowerCamelCase__ : List[Any] = len(A_ )
lowerCamelCase__ : Dict = self.dummy_model()
lowerCamelCase__ : Tuple = self.dummy_sample_deter
lowerCamelCase__ : List[str] = self.dummy_sample_deter + 0.1
lowerCamelCase__ : Optional[Any] = self.dummy_sample_deter - 0.1
lowerCamelCase__ : Dict = samplea.shape[0]
lowerCamelCase__ : Optional[int] = torch.stack([samplea, samplea, samplea], dim=0 )
lowerCamelCase__ : Dict = torch.arange(A_ )[0:3, None].repeat(1, A_ )
lowerCamelCase__ : int = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
lowerCamelCase__ : int = scheduler.batch_step_no_noise(A_, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
lowerCamelCase__ : Tuple = torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**A_ )
lowerCamelCase__ : Optional[int] = len(A_ )
lowerCamelCase__ : Optional[Any] = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
lowerCamelCase__ : List[str] = model(A_, A_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Optional[int] = scheduler.step(A_, A_, A_, generator=A_ ).prev_sample
lowerCamelCase__ : List[str] = pred_prev_sample
lowerCamelCase__ : Any = torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : int = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A_ )
lowerCamelCase__ : Optional[int] = len(A_ )
lowerCamelCase__ : List[Any] = self.dummy_model()
lowerCamelCase__ : Union[str, Any] = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
lowerCamelCase__ : List[str] = model(A_, A_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Union[str, Any] = scheduler.step(A_, A_, A_, generator=A_ ).prev_sample
lowerCamelCase__ : Dict = pred_prev_sample
lowerCamelCase__ : str = torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A_ )
lowerCamelCase__ : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
lowerCamelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
lowerCamelCase__ : int = -1
else:
lowerCamelCase__ : Any = timesteps[i + 1]
lowerCamelCase__ : Tuple = scheduler.previous_timestep(A_ )
lowerCamelCase__ : Union[str, Any] = prev_t.item()
self.assertEqual(A_, A_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**A_ )
lowerCamelCase__ : List[str] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(A_, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Any = scheduler_class(**A_ )
lowerCamelCase__ : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ : List[Any] = len(A_ )
with self.assertRaises(A_, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_, timesteps=A_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : Optional[int] = scheduler_class(**A_ )
lowerCamelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=A_ )
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Tuple = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split('.' ):
lowerCamelCase__ : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
lowerCamelCase__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
lowerCamelCase__ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : Any = value
elif weight_type == "bias":
lowerCamelCase__ : Dict = value
else:
lowerCamelCase__ : Tuple = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Tuple = fairseq_model.state_dict()
lowerCamelCase__ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Union[str, Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Tuple = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[Any] = name.split(_lowerCamelCase )[0].split('.' )[-2]
lowerCamelCase__ : List[Any] = mapped_key.replace('*' , _lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Tuple = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : str = 'weight_v'
elif "weight" in name:
lowerCamelCase__ : Optional[int] = 'weight'
elif "bias" in name:
lowerCamelCase__ : Tuple = 'bias'
else:
lowerCamelCase__ : str = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : List[Any] = name.split('.' )
lowerCamelCase__ : Dict = int(items[0] )
lowerCamelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = SEWConfig()
if is_finetuned:
lowerCamelCase__ : str = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase__ : Optional[int] = model.cfg
lowerCamelCase__ : Tuple = fs_config.conv_bias
lowerCamelCase__ : Tuple = eval(fs_config.conv_feature_layers )
lowerCamelCase__ : Union[str, Any] = [x[0] for x in conv_layers]
lowerCamelCase__ : str = [x[1] for x in conv_layers]
lowerCamelCase__ : Optional[int] = [x[2] for x in conv_layers]
lowerCamelCase__ : Union[str, Any] = 'gelu'
lowerCamelCase__ : Tuple = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
lowerCamelCase__ : List[Any] = 0.0
lowerCamelCase__ : Dict = fs_config.activation_fn.name
lowerCamelCase__ : Optional[int] = fs_config.encoder_embed_dim
lowerCamelCase__ : Dict = 0.02
lowerCamelCase__ : int = fs_config.encoder_ffn_embed_dim
lowerCamelCase__ : Optional[int] = 1e-5
lowerCamelCase__ : Tuple = fs_config.encoder_layerdrop
lowerCamelCase__ : Any = fs_config.encoder_attention_heads
lowerCamelCase__ : Optional[Any] = fs_config.conv_pos_groups
lowerCamelCase__ : List[str] = fs_config.conv_pos
lowerCamelCase__ : Tuple = len(_lowerCamelCase )
lowerCamelCase__ : Any = fs_config.encoder_layers
lowerCamelCase__ : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase__ : Dict = model.cfg
lowerCamelCase__ : Optional[int] = fs_config.final_dropout
lowerCamelCase__ : Any = fs_config.layerdrop
lowerCamelCase__ : Union[str, Any] = fs_config.activation_dropout
lowerCamelCase__ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase__ : str = fs_config.attention_dropout
lowerCamelCase__ : Tuple = fs_config.dropout_input
lowerCamelCase__ : Optional[int] = fs_config.dropout
lowerCamelCase__ : Tuple = fs_config.mask_channel_length
lowerCamelCase__ : Tuple = fs_config.mask_channel_prob
lowerCamelCase__ : Tuple = fs_config.mask_length
lowerCamelCase__ : List[Any] = fs_config.mask_prob
lowerCamelCase__ : Union[str, Any] = 'Wav2Vec2FeatureExtractor'
lowerCamelCase__ : Optional[Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Any = convert_config(model[0] , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = model[0].eval()
lowerCamelCase__ : str = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
lowerCamelCase__ : Any = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Dict = target_dict.pad_index
lowerCamelCase__ : Optional[Any] = target_dict.bos_index
lowerCamelCase__ : str = target_dict.pad_index
lowerCamelCase__ : Tuple = target_dict.bos_index
lowerCamelCase__ : int = target_dict.eos_index
lowerCamelCase__ : List[Any] = len(target_dict.symbols )
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , 'vocab.json' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
lowerCamelCase__ : int = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowerCamelCase , )
lowerCamelCase__ : Optional[Any] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : str = SEWForCTC(_lowerCamelCase )
else:
lowerCamelCase__ : Union[str, Any] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A_ : List[str] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
A_ : Union[str, Any] = list[list[float | int]]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = len(snake_case_ )
lowerCamelCase__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
lowerCamelCase__ : Union[str, Any] = matrix[row][col]
lowerCamelCase__ : Any = vector[row][0]
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[Any] = 0
while row < size and col < size:
# pivoting
lowerCamelCase__ : Union[str, Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_ , snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCamelCase__ : List[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case_ ):
lowerCamelCase__ : Dict = augmented[rowa][col] / augmented[row][col]
lowerCamelCase__ : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case_ ):
for row in range(snake_case_ ):
lowerCamelCase__ : Any = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case_ )
]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = len(snake_case_ )
lowerCamelCase__ : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
lowerCamelCase__ : Matrix = [[0] for _ in range(snake_case_ )]
lowerCamelCase__ : Matrix
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
lowerCamelCase__ : Tuple = (x_val + 1) ** (size - col - 1)
lowerCamelCase__ : Tuple = y_val
lowerCamelCase__ : Any = solve(snake_case_ , snake_case_ )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCamelCase_ ( _lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
lowerCamelCase__ : list[int] = [func(snake_case_ ) for x_val in range(1 , order + 1 )]
lowerCamelCase__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCamelCase__ : int = 0
lowerCamelCase__ : Callable[[int], int]
lowerCamelCase__ : int
for poly in polynomials:
lowerCamelCase__ : Tuple = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A_ : Any = "http://www.mocksite.com/file1.txt"
A_ : Dict = "\"text\": [\"foo\", \"foo\"]"
A_ : Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class a_ :
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 200
lowerCamelCase__ : List[Any] = {'Content-Length': '100'}
lowerCamelCase__ : Optional[int] = {}
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return [bytes(lowerCamelCase_, 'utf-8' )]
def lowerCamelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
import requests
monkeypatch.setattr(_lowerCamelCase , 'request' , _lowerCamelCase )
lowerCamelCase__ : str = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {"train": url}
lowerCamelCase__ : Tuple = "dummy"
lowerCamelCase__ : Optional[Any] = "downloads"
lowerCamelCase__ : List[Any] = tmp_path
lowerCamelCase__ : Optional[int] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
lowerCamelCase__ : Tuple = dl_manager.download(_lowerCamelCase )
lowerCamelCase__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = [downloaded_paths]
lowerCamelCase__ : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ : Union[str, Any] = downloaded_paths.values()
lowerCamelCase__ : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ : Dict = Path(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ : Optional[int] = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ : Union[str, Any] = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
lowerCamelCase__ : Optional[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = {"train": filename}
lowerCamelCase__ : Tuple = "dummy"
lowerCamelCase__ : int = xz_file.parent
lowerCamelCase__ : List[Any] = "extracted"
lowerCamelCase__ : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
lowerCamelCase__ : Tuple = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
lowerCamelCase__ : Tuple = dl_manager.extract(_lowerCamelCase )
lowerCamelCase__ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = [extracted_paths]
lowerCamelCase__ : Optional[Any] = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ : str = extracted_paths.values()
lowerCamelCase__ : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ : Any = Path(_lowerCamelCase )
lowerCamelCase__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ : int = extracted_path.read_text()
lowerCamelCase__ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
lowerCamelCase__ : List[str] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = request.getfixturevalue(_lowerCamelCase )
lowerCamelCase__ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = request.getfixturevalue(_lowerCamelCase )
lowerCamelCase__ : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/grid.txt' ) as f:
lowerCamelCase__ : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCamelCase ) for x in f.readline().split()] )
lowerCamelCase__ : str = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase__ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase__ : Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase__ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase__ : List[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase__ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase__ : Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase__ : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be an \'int\' type' )
lowerCamelCase__ : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class a_ ( torch.nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__lowerCAmelCase, self ).__init__()
lowerCamelCase__ : Optional[int] = AutoModel.from_pretrained(__lowerCAmelCase, return_dict=__lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.nn.CosineSimilarity(3, 1e-08 )
lowerCamelCase__ : Any = torch.nn.Softmax(dim=1 )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.bert(**__lowerCAmelCase ).last_hidden_state
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return token_embeddings.sum(2, keepdim=__lowerCAmelCase )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__lowerCAmelCase, __lowerCAmelCase ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = W_supports['sizes'].tolist()
lowerCamelCase__ : Tuple = W_supports['start_token_id'].item()
lowerCamelCase__ : List[Any] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase__ : Optional[Any] = self.BERT(**__lowerCAmelCase )
lowerCamelCase__ : Any = self.BERT(**__lowerCAmelCase )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : int = None
lowerCamelCase__ : Union[str, Any] = W_supports['input_ids'] == start_token_id
lowerCamelCase__ : Tuple = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__lowerCAmelCase ):
if i == 0:
lowerCamelCase__ : Dict = 0
else:
lowerCamelCase__ : Union[str, Any] = support_sizes[i - 1]
lowerCamelCase__ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase__ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase__ : Union[str, Any] = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase__ : List[Any] = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase__ : Tuple = torch.vstack((p_starts, p_start) )
lowerCamelCase__ : Tuple = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase__ : str = p_start
lowerCamelCase__ : Optional[Any] = p_end
return p_starts, p_ends
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A_ : Any = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowercase , 'rb' ) as flax_state_f:
lowerCamelCase__ : Union[str, Any] = from_bytes(_lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowercase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowerCamelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowercase ) ).values()
if any(_lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowerCamelCase__ : Optional[Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowercase )
lowerCamelCase__ : str = ""
lowerCamelCase__ : Union[str, Any] = flatten_dict(_lowercase , sep='.' )
lowerCamelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase__ : List[Any] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase__ : Union[str, Any] = flax_key_tuple_array[:-1] + ["weight"]
lowerCamelCase__ : int = jnp.transpose(_lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase__ : List[Any] = flax_key_tuple_array[:-1] + ["weight"]
lowerCamelCase__ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase__ : Union[str, Any] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowercase ):
lowerCamelCase__ : Dict = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
lowerCamelCase__ : Optional[int] = ".".join(_lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase__ : int = np.asarray(_lowercase ) if not isinstance(_lowercase , np.ndarray ) else flax_tensor
lowerCamelCase__ : Optional[Any] = torch.from_numpy(_lowercase )
# remove from missing keys
missing_keys.remove(_lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowercase )
pt_model.load_state_dict(_lowercase )
# re-transform missing_keys to list
lowerCamelCase__ : List[Any] = list(_lowercase )
if len(_lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_lowercase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(_snake_case , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = _distribute_shards(**_snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = _split_gen_kwargs(_snake_case , _snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_snake_case ):
_number_of_shards_in_gen_kwargs(_snake_case )
else:
lowerCamelCase__ : Any = _number_of_shards_in_gen_kwargs(_snake_case )
assert out == expected
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : int = 16
A_ : str = 32
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16 ):
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Any = DatasetDict(
{
'train': dataset['train'].select(_lowercase ),
'validation': dataset['train'].select(_lowercase ),
'test': dataset['validation'],
} )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Union[str, Any] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : str = 8
else:
lowerCamelCase__ : Any = None
return tokenizer.pad(
_lowercase , padding='longest' , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCamelCase__ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCamelCase__ : str = DataLoader(
tokenized_datasets['test'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# New Code #
lowerCamelCase__ : Union[str, Any] = []
# Download the dataset
lowerCamelCase__ : Dict = load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCamelCase__ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] = config['''lr''']
lowerCamelCase__ : Optional[int] = int(config['num_epochs'] )
lowerCamelCase__ : Dict = int(config['seed'] )
lowerCamelCase__ : int = int(config['batch_size'] )
lowerCamelCase__ : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : int = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
lowerCamelCase__ : Dict = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCamelCase__ : List[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
lowerCamelCase__ : int = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Any = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
lowerCamelCase__ : int = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Union[str, Any] = model(**_lowercase )
lowerCamelCase__ : List[Any] = outputs.loss
lowerCamelCase__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**_lowercase )
lowerCamelCase__ : Tuple = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCamelCase__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Dict = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Dict = model(**_lowercase )
lowerCamelCase__ : List[str] = outputs.logits
lowerCamelCase__ : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowerCamelCase__ : int = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : Optional[Any] = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print('Average test metrics from all folds:' , _lowercase )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=_lowercase , default=3 , help='The number of splits to perform across the dataset' )
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
if len(__UpperCamelCase ) == 0:
return array
lowerCamelCase__ , lowerCamelCase__ : int = min(__UpperCamelCase ), max(__UpperCamelCase )
# Compute the variables
lowerCamelCase__ : Optional[int] = _max - _min + 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowerCamelCase__ : str = i - _min
lowerCamelCase__ : int = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowerCamelCase__ : List[Any] = 0
for i in range(__UpperCamelCase ):
while holes_repeat[i] > 0:
lowerCamelCase__ : Optional[int] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Union[str, Any] = input("Enter numbers separated by comma:\n")
A_ : str = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=6_4, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = np.random.default_rng(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = length
lowerCamelCase__ : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
lowerCamelCase__ : Tuple = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__(self ):
'''simple docstring'''
return self.length
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=False ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase__ : Dict = True
def a__ (self, lowerCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowerCamelCase__ : Union[str, Any] = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=False ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : List[str] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCamelCase__ : Optional[int] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCamelCase__ : List[Any] = True
def a__ (self, lowerCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowerCamelCase__ : Optional[int] = False
return x * self.a + self.b
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[int] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
lowerCamelCase__ : Any = load_dataset('csv' , data_files=_lowerCamelCase )
lowerCamelCase__ : Any = datasets["""train"""].unique('label' )
lowerCamelCase__ : List[Any] = {v: i for i, v in enumerate(_lowerCamelCase )}
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Dict = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
if "label" in examples:
lowerCamelCase__ : Optional[int] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ : List[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] = DataLoader(tokenized_datasets['train'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=2 )
lowerCamelCase__ : Dict = DataLoader(tokenized_datasets['validation'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ : int = logging.get_logger(__name__)
class a_ ( __snake_case ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.', A_, )
super().__init__(*A_, **A_ )
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = tmp_path / 'file.csv'
lowerCamelCase__ : Optional[int] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = tmp_path / 'malformed_file.csv'
lowerCamelCase__ : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = tmp_path / 'csv_with_image.csv'
lowerCamelCase__ : Any = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = tmp_path / 'csv_with_label.csv'
lowerCamelCase__ : Tuple = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = tmp_path / 'csv_with_int_list.csv'
lowerCamelCase__ : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = Csv()
lowerCamelCase__ : int = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( _lowerCamelCase ):
with open(_lowerCamelCase , encoding='utf-8' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1]
lowerCamelCase__ : str = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCamelCase__ : Dict = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCamelCase__ : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( _lowerCamelCase ):
with open(_lowerCamelCase , encoding='utf-8' ) as f:
lowerCamelCase__ : str = f.read().splitlines()[1:]
lowerCamelCase__ : Dict = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCamelCase__ : Dict = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_lowerCamelCase ) for label in labels]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
lowerCamelCase__ : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCamelCase__ : List[Any] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import math
import os
import sys
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = ""
try:
with open(__lowerCAmelCase , 'rb' ) as binary_file:
lowerCamelCase__ : Tuple = binary_file.read()
for dat in data:
lowerCamelCase__ : str = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lexicon.pop(__lowerCAmelCase )
lowerCamelCase__ : str = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
lowerCamelCase__ : List[str] = "0" + lexicon[curr_key]
lowerCamelCase__ : str = bin(__lowerCAmelCase )[2:]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = {"0": "0", "1": "1"}
lowerCamelCase__ : Optional[int] = "", ""
lowerCamelCase__ : int = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase__ : Tuple = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
lowerCamelCase__ : Any = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCamelCase__ : List[str] = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = os.path.getsize(__lowerCAmelCase )
lowerCamelCase__ : List[str] = bin(__lowerCAmelCase )[2:]
lowerCamelCase__ : Dict = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 8
try:
with open(__lowerCAmelCase , 'wb' ) as opened_file:
lowerCamelCase__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = read_file_binary(__lowerCAmelCase )
lowerCamelCase__ : Tuple = compress_data(__lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Tuple = {"vocab_file": "spiece.model"}
A_ : Optional[Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A_ : Any = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
A_ : int = "▁"
class a_ ( __a ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = ["input_ids", "attention_mask"]
def __init__(self, lowerCamelCase_, lowerCamelCase_="</s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_=1_0_0, lowerCamelCase_=None, lowerCamelCase_ = None, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ : Tuple = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase__ : List[Any] = len(set(filter(lambda lowerCamelCase_ : bool('extra_id' in str(lowerCAmelCase_ ) ), lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowerCamelCase__ : Any = legacy
lowerCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_, unk_token=lowerCAmelCase_, pad_token=lowerCAmelCase_, extra_ids=lowerCAmelCase_, additional_special_tokens=lowerCAmelCase_, sp_model_kwargs=self.sp_model_kwargs, legacy=lowerCAmelCase_, **lowerCAmelCase_, )
lowerCamelCase__ : Dict = vocab_file
lowerCamelCase__ : Union[str, Any] = extra_ids
lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCamelCase__ : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.', lowerCAmelCase_, )
return max_model_length
@property
def a__ (self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_, token_ids_a=lowerCAmelCase_, already_has_special_tokens=lowerCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + [1]
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def a__ (self ):
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase_ : bool(re.search(r'<extra_id_\d+>', lowerCAmelCase_ ) ) is not None, self.additional_special_tokens ) ) )
def a__ (self ):
'''simple docstring'''
return [self._convert_token_to_id(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = self._add_eos_if_not_present(lowerCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase__ : Tuple = self._add_eos_if_not_present(lowerCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.__dict__.copy()
lowerCamelCase__ : str = None
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ (self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if not self.legacy:
lowerCamelCase__ : List[str] = SPIECE_UNDERLINE + text.replace(lowerCAmelCase_, ' ' )
return super().tokenize(lowerCAmelCase_, **lowerCAmelCase_ )
def a__ (self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if not self.legacy:
lowerCamelCase__ : int = text.startswith(lowerCAmelCase_ )
if is_first:
lowerCamelCase__ : List[Any] = text[1:]
lowerCamelCase__ : Tuple = self.sp_model.encode(lowerCAmelCase_, out_type=lowerCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(lowerCAmelCase_ ):
lowerCamelCase__ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
lowerCamelCase__ : Tuple = re.match(r'<extra_id_(\d+)>', lowerCAmelCase_ )
lowerCamelCase__ : List[str] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCamelCase__ : Union[str, Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
else:
lowerCamelCase__ : int = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[Any] = ''
lowerCamelCase__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
lowerCamelCase__ : str = True
lowerCamelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
lowerCamelCase__ : str = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Any = os.path.join(
lowerCAmelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_, 'wb' ) as fi:
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir("fixtures")
A_ : List[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A_ : str = get_tests_dir("fixtures/dummy-config.json")
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 0
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__a, __a )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a, __a )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCamelCase__ : str = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
lowerCamelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
lowerCamelCase__ : Any = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__a, __a )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a, __a )
def a__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
__a, 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('bert-base' )
def a__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
__a, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained(__a, revision='aaaaaa' )
def a__ (self ):
'''simple docstring'''
with self.assertRaisesRegex(
__a, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
lowerCamelCase__ : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(__a ):
lowerCamelCase__ : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
lowerCamelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=__a )
lowerCamelCase__ : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
lowerCamelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(__a, trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
def a__ (self ):
'''simple docstring'''
try:
AutoConfig.register('custom', __a )
AutoFeatureExtractor.register(__a, __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a, __a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ : Tuple = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a, __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ (self ):
'''simple docstring'''
class a_ ( __lowercase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = True
try:
AutoConfig.register('custom', __a )
AutoFeatureExtractor.register(__a, __a )
# If remote code is not set, the default is to use local
lowerCamelCase__ : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__a, 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_ ( _lowerCamelCase=32 , _lowerCamelCase=10 , _lowerCamelCase=100 , _lowerCamelCase=1026 , _lowerCamelCase=True , _lowerCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase__ , lowerCamelCase__ : Dict = generate_datasets(
_UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=1026 , trim=_UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase__ : int = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
lowerCamelCase__ : Union[str, Any] = load_gpta('gpt2' ).to(_UpperCamelCase )
print('computing perplexity on objective set' )
lowerCamelCase__ : int = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item()
print('perplexity on objective set:' , _UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=15 , _lowerCamelCase=128 , _lowerCamelCase=100 , _lowerCamelCase="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowerCamelCase__ : Optional[int] = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase__ : int = SecondaryLearner(_UpperCamelCase )
# Train secondary learner
lowerCamelCase__ : Any = train_secondary_learner(
_UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=100 , igf_model_path=_UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=32 , _lowerCamelCase=1000 , _lowerCamelCase=16 , _lowerCamelCase=1.0 , _lowerCamelCase=recopy_gpta , _lowerCamelCase=None , _lowerCamelCase=10 , _lowerCamelCase="gpt2_finetuned.pt" , ):
lowerCamelCase__ : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
lowerCamelCase__ : int = RandomSampler(_UpperCamelCase )
lowerCamelCase__ : Any = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase )
lowerCamelCase__ : str = max_steps // (len(_UpperCamelCase )) + 1
lowerCamelCase__ : str = 0
lowerCamelCase__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCamelCase )
secondary_learner.eval()
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase__ : int = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
for epoch in range(int(_UpperCamelCase ) ):
for step, example in enumerate(_UpperCamelCase ):
torch.cuda.empty_cache()
lowerCamelCase__ : Optional[int] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase__ : Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase__ : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
lowerCamelCase__ : Any = True
if secondary_learner is not None:
lowerCamelCase__ : List[str] = secondary_learner.forward(
torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase__ : List[str] = -1
if predicted_q < threshold:
lowerCamelCase__ : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase__ : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase__ : Union[str, Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase__ : Tuple = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_UpperCamelCase , type=_UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=_UpperCamelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_UpperCamelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_UpperCamelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_UpperCamelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_UpperCamelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_UpperCamelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_UpperCamelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_UpperCamelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_UpperCamelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_UpperCamelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_UpperCamelCase , type=_UpperCamelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_UpperCamelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_UpperCamelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_UpperCamelCase , type=_UpperCamelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCamelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
lowerCamelCase__ : str = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
lowerCamelCase__ : Any = training_secondary_learner(
_UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
lowerCamelCase__ : List[str] = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase__ , lowerCamelCase__ : Tuple = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = []
for line in lines:
lowerCamelCase__ : Any = re.sub(r'#.*' , '' , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
lowerCamelCase__ : str = '\n'.join(_lowerCamelCase )
# Make a hash from all this code
lowerCamelCase__ : Any = full_str.encode('utf-8' )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
A_ : str = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A_ : str = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A_ : Any = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
A_ : Any = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : List[str] = None
A_ : Tuple = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ : Optional[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
A_ : Dict = {
"moussaKam/mbarthez": 10_24,
"moussaKam/barthez": 10_24,
"moussaKam/barthez-orangesum-title": 10_24,
}
A_ : Tuple = "▁"
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = ['input_ids', 'attention_mask']
lowerCamelCase__ : int = BarthezTokenizer
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : List[Any] = False if not self.vocab_file else True
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
lowerCamelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
return (out_vocab_file,)
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A_ : Any = "examples/"
A_ : int = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A_ : Optional[Any] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A_ : List[Any] = "README.md"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : Optional[Any] = f.read()
lowerCamelCase__ , lowerCamelCase__ : int = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : int = replace.replace('VERSION' , lowercase__ )
lowerCamelCase__ : int = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(lowercase__ )
def lowerCamelCase_ ( _lowerCamelCase ):
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern='examples' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Union[str, Any] = '🤗 Transformers currently provides the following architectures'
lowerCamelCase__ : Any = '1. Want to contribute a new model?'
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCamelCase__ : List[str] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowercase__ )
def lowerCamelCase_ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCamelCase__ : str = f.read()
lowerCamelCase__ : str = REPLACE_PATTERNS['init'][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def lowerCamelCase_ ( _lowerCamelCase=False ):
lowerCamelCase__ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCamelCase__ : int = default_version.base_version
elif patch:
lowerCamelCase__ : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : Optional[int] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : int = input(f'''Which version are you releasing? [{default_version}]''' )
if len(lowercase__ ) == 0:
lowerCamelCase__ : Optional[Any] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase_ ( ):
lowerCamelCase__ : int = get_version()
lowerCamelCase__ : Dict = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Optional[int] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(lowercase__ ) == 0:
lowerCamelCase__ : str = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase__ )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ : List[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
lowerCamelCase__ : Dict = get_aligned_output_features_output_indices(lowercase__, lowercase__, lowercase__ )
self.assertEqual(lowercase__, ['c'] )
self.assertEqual(lowercase__, [2] )
# Out indices set to match out features
lowerCamelCase__ : str = get_aligned_output_features_output_indices(['a', 'c'], lowercase__, lowercase__ )
self.assertEqual(lowercase__, ['a', 'c'] )
self.assertEqual(lowercase__, [0, 2] )
# Out features set to match out indices
lowerCamelCase__ : Any = get_aligned_output_features_output_indices(lowercase__, [0, 2], lowercase__ )
self.assertEqual(lowercase__, ['a', 'c'] )
self.assertEqual(lowercase__, [0, 2] )
# Out features selected from negative indices
lowerCamelCase__ : Optional[int] = get_aligned_output_features_output_indices(lowercase__, [-3, -1], lowercase__ )
self.assertEqual(lowercase__, ['a', 'c'] )
self.assertEqual(lowercase__, [-3, -1] )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 1), lowercase__ )
# Out features must be a list
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(('a', 'b'), (0, 1), ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 1), ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__, 0, ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__, (0, 1), ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['a', 'b'], (0,), ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 2), ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['b', 'a'], (0, 1), ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'], (0, 1, -1), ['a', 'b', 'c', 'd'] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BackboneMixin()
lowerCamelCase__ : List[Any] = ['''a''', '''b''', '''c''']
lowerCamelCase__ : Union[str, Any] = ['''a''', '''c''']
lowerCamelCase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ['a', 'c'] )
self.assertEqual(backbone.out_indices, [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase__ : List[str] = ['''a''', '''b''']
self.assertEqual(backbone.out_features, ['a', 'b'] )
self.assertEqual(backbone.out_indices, [0, 1] )
lowerCamelCase__ : Dict = [-3, -1]
self.assertEqual(backbone.out_features, ['a', 'c'] )
self.assertEqual(backbone.out_indices, [-3, -1] )
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A_ : List[Any] = logging.get_logger(__name__)
# General docstring
A_ : str = "RegNetConfig"
# Base docstring
A_ : Union[str, Any] = "facebook/regnet-y-040"
A_ : Any = [1, 10_88, 7, 7]
# Image classification docstring
A_ : Optional[Any] = "facebook/regnet-y-040"
A_ : Union[str, Any] = "tabby, tabby cat"
A_ : Optional[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = 3, lowerCamelCase_ = 1, lowerCamelCase_ = 1, lowerCamelCase_ = "relu", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowercase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCamelCase__ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCamelCase__ : List[Any] = tf.keras.layers.ConvaD(
filters=lowercase__, kernel_size=lowercase__, strides=lowercase__, padding='VALID', groups=lowercase__, use_bias=lowercase__, name='convolution', )
lowerCamelCase__ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name='normalization' )
lowerCamelCase__ : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.convolution(self.padding(lowercase__ ) )
lowerCamelCase__ : Tuple = self.normalization(lowercase__ )
lowerCamelCase__ : int = self.activation(lowercase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : Optional[Any] = config.num_channels
lowerCamelCase__ : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name='embedder', )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = shape_list(lowercase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCamelCase__ : Union[str, Any] = tf.transpose(lowercase__, perm=(0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.embedder(lowercase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = 2, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : Dict = tf.keras.layers.ConvaD(
filters=lowercase__, kernel_size=1, strides=lowercase__, use_bias=lowercase__, name='convolution' )
lowerCamelCase__ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name='normalization' )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(lowercase__ ), training=lowercase__ )
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__, name='pooler' )
lowerCamelCase__ : str = [
tf.keras.layers.ConvaD(filters=lowercase__, kernel_size=1, activation='relu', name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase__, kernel_size=1, activation='sigmoid', name='attention.2' ),
]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.pooler(lowercase__ )
for layer_module in self.attention:
lowerCamelCase__ : Union[str, Any] = layer_module(lowercase__ )
lowerCamelCase__ : List[Any] = hidden_state * pooled
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : str = in_channels != out_channels or stride != 1
lowerCamelCase__ : Any = max(1, out_channels // config.groups_width )
lowerCamelCase__ : int = (
TFRegNetShortCut(lowercase__, stride=lowercase__, name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCamelCase__ : str = [
TFRegNetConvLayer(lowercase__, kernel_size=1, activation=config.hidden_act, name='layer.0' ),
TFRegNetConvLayer(
lowercase__, stride=lowercase__, groups=lowercase__, activation=config.hidden_act, name='layer.1' ),
TFRegNetConvLayer(lowercase__, kernel_size=1, activation=lowercase__, name='layer.2' ),
]
lowerCamelCase__ : Any = ACTaFN[config.hidden_act]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = hidden_state
for layer_module in self.layers:
lowerCamelCase__ : Tuple = layer_module(lowercase__ )
lowerCamelCase__ : List[Any] = self.shortcut(lowercase__ )
hidden_state += residual
lowerCamelCase__ : List[Any] = self.activation(lowercase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : str = in_channels != out_channels or stride != 1
lowerCamelCase__ : List[Any] = max(1, out_channels // config.groups_width )
lowerCamelCase__ : Optional[Any] = (
TFRegNetShortCut(lowercase__, stride=lowercase__, name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut' )
)
lowerCamelCase__ : Tuple = [
TFRegNetConvLayer(lowercase__, kernel_size=1, activation=config.hidden_act, name='layer.0' ),
TFRegNetConvLayer(
lowercase__, stride=lowercase__, groups=lowercase__, activation=config.hidden_act, name='layer.1' ),
TFRegNetSELayer(lowercase__, reduced_channels=int(round(in_channels / 4 ) ), name='layer.2' ),
TFRegNetConvLayer(lowercase__, kernel_size=1, activation=lowercase__, name='layer.3' ),
]
lowerCamelCase__ : Optional[Any] = ACTaFN[config.hidden_act]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = hidden_state
for layer_module in self.layers:
lowerCamelCase__ : Optional[int] = layer_module(lowercase__ )
lowerCamelCase__ : List[str] = self.shortcut(lowercase__ )
hidden_state += residual
lowerCamelCase__ : List[Any] = self.activation(lowercase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 2, lowerCamelCase_ = 2, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowerCamelCase__ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowercase__, lowercase__, lowercase__, stride=lowercase__, name='layers.0' ),
*[layer(lowercase__, lowercase__, lowercase__, name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for layer_module in self.layers:
lowerCamelCase__ : str = layer_module(lowercase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase__, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name='stages.0', ) )
lowerCamelCase__ : Optional[int] = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase__, config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase__, lowercase__, lowercase__, depth=lowercase__, name=f'''stages.{i+1}''' ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False, lowerCamelCase_ = True ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ : Dict = hidden_states + (hidden_state,)
lowerCamelCase__ : List[str] = stage_module(lowercase__ )
if output_hidden_states:
lowerCamelCase__ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase__, hidden_states=lowercase__ )
@keras_serializable
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase__ : Dict = RegNetConfig
def __init__(self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCamelCase__ : Tuple = config
lowerCamelCase__ : Optional[int] = TFRegNetEmbeddings(lowercase__, name='embedder' )
lowerCamelCase__ : Tuple = TFRegNetEncoder(lowercase__, name='encoder' )
lowerCamelCase__ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__, name='pooler' )
@unpack_inputs
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = False, ):
'''simple docstring'''
lowerCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] = self.embedder(lowercase__, training=lowercase__ )
lowerCamelCase__ : List[Any] = self.encoder(
lowercase__, output_hidden_states=lowercase__, return_dict=lowercase__, training=lowercase__ )
lowerCamelCase__ : Optional[Any] = encoder_outputs[0]
lowerCamelCase__ : Optional[Any] = self.pooler(lowercase__ )
# Change to NCHW output format have uniformity in the modules
lowerCamelCase__ : Optional[int] = tf.transpose(lowercase__, perm=(0, 3, 1, 2) )
lowerCamelCase__ : List[Any] = tf.transpose(lowercase__, perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCamelCase__ : Any = tuple([tf.transpose(lowercase__, perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__, pooler_output=lowercase__, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase__ : Any = RegNetConfig
lowerCamelCase__ : List[Any] = """regnet"""
lowerCamelCase__ : Tuple = """pixel_values"""
@property
def a__ (self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4), dtype=tf.floataa )}
A_ : Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
A_ : Dict = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , _UpperCAmelCase , )
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowercase__, *lowercase__, **lowercase__ )
lowerCamelCase__ : Tuple = TFRegNetMainLayer(lowercase__, name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowercase__, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Dict = self.regnet(
pixel_values=lowercase__, output_hidden_states=lowercase__, return_dict=lowercase__, training=lowercase__, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _UpperCAmelCase , )
class a_ ( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowercase__, *lowercase__, **lowercase__ )
lowerCamelCase__ : Optional[int] = config.num_labels
lowerCamelCase__ : List[str] = TFRegNetMainLayer(lowercase__, name='regnet' )
# classification head
lowerCamelCase__ : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowercase__, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def a__ (self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[Any] = self.regnet(
lowercase__, output_hidden_states=lowercase__, return_dict=lowercase__, training=lowercase__ )
lowerCamelCase__ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ : Tuple = self.classifier[0](lowercase__ )
lowerCamelCase__ : Optional[int] = self.classifier[1](lowercase__ )
lowerCamelCase__ : Any = None if labels is None else self.hf_compute_loss(labels=lowercase__, logits=lowercase__ )
if not return_dict:
lowerCamelCase__ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase__, logits=lowercase__, hidden_states=outputs.hidden_states )
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
self.set_matricies(red=__snake_case, green=__snake_case, blue=__snake_case, red_edge=__snake_case, nir=__snake_case )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
if red is not None:
lowerCamelCase__ : Union[str, Any] = red
if green is not None:
lowerCamelCase__ : Tuple = green
if blue is not None:
lowerCamelCase__ : Optional[Any] = blue
if red_edge is not None:
lowerCamelCase__ : Any = red_edge
if nir is not None:
lowerCamelCase__ : List[Any] = nir
return True
def a__ (self, lowerCamelCase_="", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
self.set_matricies(red=__snake_case, green=__snake_case, blue=__snake_case, red_edge=__snake_case, nir=__snake_case )
lowerCamelCase__ : List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def a__ (self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def a__ (self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a__ (self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def a__ (self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a__ (self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def a__ (self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def a__ (self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a__ (self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def a__ (self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a__ (self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a__ (self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a__ (self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a__ (self, lowerCamelCase_=0.08, lowerCamelCase_=1.22, lowerCamelCase_=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a__ (self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a__ (self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def a__ (self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def a__ (self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def a__ (self ):
'''simple docstring'''
return self.nir - self.green
def a__ (self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def a__ (self, lowerCamelCase_=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def a__ (self, lowerCamelCase_=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a__ (self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def a__ (self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a__ (self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def a__ (self ):
'''simple docstring'''
return self.nir / self.red
def a__ (self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def a__ (self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a__ (self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def a__ (self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def a__ (self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def a__ (self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def a__ (self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def a__ (self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a__ (self ):
'''simple docstring'''
return self.nir / self.red
def a__ (self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def a__ (self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ):
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : Tuple = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : int = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = 3129
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Optional[int] = 'vqa2-id2label.json'
lowerCamelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Union[str, Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : List[str] = {0: 'False', 1: 'True'}
lowerCamelCase__ : Tuple = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : Dict = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : str = True
lowerCamelCase__ : Optional[Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Any = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Any = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )['state_dict']
lowerCamelCase__ : List[Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : Optional[int] = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
lowerCamelCase__ : List[str] = ViltImageProcessor(size=384 )
lowerCamelCase__ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase__ : Tuple = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : Optional[int] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowerCamelCase ).raw )
lowerCamelCase__ : List[str] = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowerCamelCase__ : Dict = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : Optional[int] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_lowerCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : Optional[int] = 'a bunch of [MASK] laying on a [MASK].'
else:
lowerCamelCase__ : Union[str, Any] = 'How many cats are there?'
lowerCamelCase__ : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors='pt' )
lowerCamelCase__ : Optional[int] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : str = torch.Size([1, 11, 3_0522] )
lowerCamelCase__ : Union[str, Any] = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : Optional[int] = torch.Size([1, 3129] )
lowerCamelCase__ : Optional[int] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : Union[str, Any] = torch.Size([1, 2] )
lowerCamelCase__ : List[str] = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A_ : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a_ ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = "EncodecFeatureExtractor"
lowerCamelCase__ : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(__a, __a )
lowerCamelCase__ : str = self.feature_extractor
lowerCamelCase__ : int = False
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__a, language=__a, no_timestamps=__a )
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__a, **__a )
lowerCamelCase__ : Optional[int] = kwargs.pop('audio', __a )
lowerCamelCase__ : str = kwargs.pop('sampling_rate', __a )
lowerCamelCase__ : str = kwargs.pop('text', __a )
if len(__a ) > 0:
lowerCamelCase__ : Dict = args[0]
lowerCamelCase__ : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
lowerCamelCase__ : List[str] = self.tokenizer(__a, **__a )
if audio is not None:
lowerCamelCase__ : Optional[Any] = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase__ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase__ : Optional[Any] = audio_inputs["padding_mask"]
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = kwargs.pop('audio', __a )
lowerCamelCase__ : List[Any] = kwargs.pop('padding_mask', __a )
if len(__a ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__a, padding_mask=__a )
else:
return self.tokenizer.batch_decode(*__a, **__a )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : str = to_numpy(__a )
lowerCamelCase__ : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(__a )
lowerCamelCase__ : Optional[int] = to_numpy(__a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase__ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase__ : List[str] = 1 - self.feature_extractor.padding_value
lowerCamelCase__ : Optional[Any] = np.pad(__a, ((0, 0), (0, difference)), 'constant', constant_values=__a )
lowerCamelCase__ : Union[str, Any] = audio_values.tolist()
for i in range(__a ):
lowerCamelCase__ : Optional[int] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase__ : Any = sliced_audio.reshape(__a, -1 )
return audio_values
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
from itertools import permutations
def lowerCamelCase_ ( _lowerCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase__ : Union[str, Any] = [7, 11, 13, 17]
for i, test in enumerate(__UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase_ ( _lowerCamelCase = 10 ):
return sum(
int(''.join(map(__UpperCamelCase , __UpperCamelCase ) ) )
for num in permutations(range(__UpperCamelCase ) )
if is_substring_divisible(__UpperCamelCase ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.