code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case (lowerCamelCase ):
def __init__( self: Any , A_: Optional[int] , A_: Optional[int]=13 , A_: Tuple=7 , A_: Optional[Any]=True , A_: Optional[int]=True , A_: str=True , A_: int=True , A_: List[str]=True , A_: Dict=False , A_: Optional[Any]=False , A_: int=False , A_: Any=2 , A_: int=99 , A_: str=0 , A_: Dict=32 , A_: str=5 , A_: str=4 , A_: str=0.1 , A_: str=0.1 , A_: List[str]=5_12 , A_: Optional[Any]=12 , A_: Union[str, Any]=2 , A_: Union[str, Any]=0.02 , A_: List[str]=3 , A_: int=4 , A_: Optional[int]="last" , A_: Tuple=None , A_: Union[str, Any]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_lengths
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = gelu_activation
__lowerCamelCase = sinusoidal_embeddings
__lowerCamelCase = causal
__lowerCamelCase = asm
__lowerCamelCase = n_langs
__lowerCamelCase = vocab_size
__lowerCamelCase = n_special
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = summary_type
__lowerCamelCase = use_proj
__lowerCamelCase = scope
def __a ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_input_lengths:
__lowerCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , 2 ).float()
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self: List[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __a ( self: Union[str, Any] , A_: Union[str, Any] , A_: Optional[Any] , A_: Optional[int] , A_: Union[str, Any] , A_: Optional[int] , A_: Dict , A_: Union[str, Any] , A_: Optional[int] , A_: List[str] , ):
__lowerCamelCase = FlaubertModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , lengths=A_ , langs=A_ )
__lowerCamelCase = model(A_ , langs=A_ )
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self: Union[str, Any] , A_: Any , A_: Optional[int] , A_: int , A_: List[Any] , A_: Union[str, Any] , A_: Tuple , A_: Any , A_: Dict , A_: Optional[Any] , ):
__lowerCamelCase = FlaubertWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self: Tuple , A_: Tuple , A_: Optional[int] , A_: Dict , A_: Tuple , A_: Tuple , A_: List[str] , A_: Optional[Any] , A_: Optional[int] , A_: Union[str, Any] , ):
__lowerCamelCase = FlaubertForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ )
__lowerCamelCase = model(A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self: Dict , A_: List[Any] , A_: Union[str, Any] , A_: Dict , A_: List[str] , A_: Dict , A_: Optional[int] , A_: int , A_: str , A_: List[str] , ):
__lowerCamelCase = FlaubertForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ )
__lowerCamelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
__lowerCamelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((__lowerCamelCase) ,) = result_with_labels.to_tuple()
__lowerCamelCase = model(A_ , start_positions=A_ , end_positions=A_ )
((__lowerCamelCase) ,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self: List[Any] , A_: List[str] , A_: str , A_: Optional[int] , A_: Any , A_: int , A_: Union[str, Any] , A_: Tuple , A_: int , A_: Dict , ):
__lowerCamelCase = FlaubertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ )
__lowerCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self: Dict , A_: List[Any] , A_: List[Any] , A_: Any , A_: Union[str, Any] , A_: Optional[int] , A_: int , A_: List[str] , A_: Tuple , A_: Union[str, Any] , ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = FlaubertForTokenClassification(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self: Tuple , A_: Tuple , A_: Optional[int] , A_: List[Any] , A_: Optional[Any] , A_: int , A_: List[str] , A_: Any , A_: Union[str, Any] , A_: Optional[Any] , ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = FlaubertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__a = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self: Optional[Any] , A_: Any , A_: List[Any] , A_: int , A_: List[str] , A_: Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self: Tuple , A_: Tuple , A_: Optional[Any] , A_: str=False ):
__lowerCamelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __a ( self: Tuple ):
__lowerCamelCase = FlaubertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=A_ , emb_dim=37 )
def __a ( self: Tuple ):
self.config_tester.run_common_tests()
def __a ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def __a ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def __a ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A_ )
def __a ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def __a ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def __a ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A_ )
def __a ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A_ )
@slow
def __a ( self: Union[str, Any] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def __a ( self: Optional[int] ):
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowerCamelCase = True
__lowerCamelCase = model_class(config=A_ )
__lowerCamelCase = self._prepare_for_class(A_ , A_ )
__lowerCamelCase = torch.jit.trace(
A_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , """traced_model.pt""" ) )
__lowerCamelCase = torch.jit.load(os.path.join(A_ , """traced_model.pt""" ) , map_location=A_ )
loaded(inputs_dict["""input_ids"""].to(A_ ) , inputs_dict["""attention_mask"""].to(A_ ) )
@require_torch
class __snake_case (unittest.TestCase ):
@slow
def __a ( self: Optional[Any] ):
__lowerCamelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__lowerCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__lowerCamelCase = model(A_ )[0]
__lowerCamelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A_ )
__lowerCamelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
| 281 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_lowerCamelCase = random.Random()
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : int=1.0 , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None ):
'''simple docstring'''
if rng is None:
__SCREAMING_SNAKE_CASE : int = global_rng
__SCREAMING_SNAKE_CASE : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self :Any , _lowerCamelCase :Optional[int] , _lowerCamelCase :Tuple=7 , _lowerCamelCase :str=4_0_0 , _lowerCamelCase :Dict=2_0_0_0 , _lowerCamelCase :str=2_0_4_8 , _lowerCamelCase :Optional[Any]=1_2_8 , _lowerCamelCase :Dict=1 , _lowerCamelCase :int=5_1_2 , _lowerCamelCase :Union[str, Any]=3_0 , _lowerCamelCase :str=4_4_1_0_0 , ):
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : int = min_seq_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_seq_length
__SCREAMING_SNAKE_CASE : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE : str = spectrogram_length
__SCREAMING_SNAKE_CASE : Optional[Any] = feature_size
__SCREAMING_SNAKE_CASE : Any = num_audio_channels
__SCREAMING_SNAKE_CASE : Tuple = hop_length
__SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
def SCREAMING_SNAKE_CASE_ ( self :int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=False ):
def _flatten(_lowerCamelCase :Union[str, Any] ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
__SCREAMING_SNAKE_CASE : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE : List[str] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCamelCase__ = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : List[Any] = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(snake_case__ , '''feature_size''' ) )
self.assertTrue(hasattr(snake_case__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(snake_case__ , '''hop_length''' ) )
self.assertTrue(hasattr(snake_case__ , '''chunk_length''' ) )
self.assertTrue(hasattr(snake_case__ , '''sampling_rate''' ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(snake_case__ )
__SCREAMING_SNAKE_CASE : int = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
__SCREAMING_SNAKE_CASE : List[str] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : int = os.path.join(snake_case__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_json_file(snake_case__ )
__SCREAMING_SNAKE_CASE : int = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = dict_first.pop('''mel_filters''' )
__SCREAMING_SNAKE_CASE : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(snake_case__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(
snake_case__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=snake_case__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(snake_case__ )
__SCREAMING_SNAKE_CASE : Any = feature_extractor(snake_case__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE : str = ds.sort('''id''' ).select(range(snake_case__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Any = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractor()
__SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(snake_case__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1e-4 ) )
| 719 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase__ = '''nat'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , _lowerCamelCase :int=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[int]=6_4 , _lowerCamelCase :Optional[Any]=[3, 4, 6, 5] , _lowerCamelCase :Optional[int]=[2, 4, 8, 1_6] , _lowerCamelCase :str=7 , _lowerCamelCase :int=3.0 , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :str=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :int="gelu" , _lowerCamelCase :Dict=0.0_2 , _lowerCamelCase :str=1e-5 , _lowerCamelCase :List[Any]=0.0 , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :Union[str, Any] , ):
super().__init__(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embed_dim
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Any = kernel_size
__SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
__SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
__SCREAMING_SNAKE_CASE : Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 401 | 0 |
from __future__ import annotations
from typing import Any
class lowerCamelCase_ ( _lowercase ):
pass
class lowerCamelCase_ :
def __init__( self : Optional[int] , __A : Any ):
__A : Any = data
__A : Node | None = None
def __iter__( self : Dict ):
__A : Optional[Any] = self
__A : Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__A : Optional[Any] = node.next_node
@property
def lowerCAmelCase_ ( self : Tuple ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Node(1)
UpperCAmelCase_ : List[Any] = Node(2)
UpperCAmelCase_ : Optional[int] = Node(3)
UpperCAmelCase_ : List[Any] = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ : str = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ : int = Node(5)
UpperCAmelCase_ : Dict = Node(6)
UpperCAmelCase_ : Any = Node(5)
UpperCAmelCase_ : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ : Union[str, Any] = Node(1)
print(root_node.has_loop) # False
| 17 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) )
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
_UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(images=snake_case , visual_prompt=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case )
_UpperCAmelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
| 573 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : Optional[int]= logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
A__ : int= list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A__ : List[str]= tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCamelCase :
a : str =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a : Optional[int] =field(
default=__A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a : int =field(
default=__A , metadata={"""help""": """The column name of the images in the files. If not set, will try to use \'image\' or \'img\'."""} , )
a : List[Any] =field(default=__A , metadata={"""help""": """A folder containing the training data."""} )
a : Any =field(default=__A , metadata={"""help""": """A folder containing the validation data."""} )
a : List[Any] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
a : str =field(default=3_2 , metadata={"""help""": """The size of the square patches to use for masking."""} )
a : Union[str, Any] =field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
a : int =field(
default=__A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : int =field(
default=__A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = {}
if self.train_dir is not None:
UpperCamelCase__ = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ = self.validation_dir
UpperCamelCase__ = data_files if data_files else None
@dataclass
class __lowerCamelCase :
a : str =field(
default=__A , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don\'t set if you want to train a model from scratch."""
)
} , )
a : Optional[Any] =field(
default=__A , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__A )} , )
a : str =field(
default=__A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : List[Any] =field(
default=__A , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a : List[Any] =field(
default=__A , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
a : int =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : int =field(default=__A , metadata={"""help""": """Name or path of preprocessor config."""} )
a : Any =field(
default=__A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a : int =field(
default=__A , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
a : List[Any] =field(
default=__A , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
a : Tuple =field(
default=__A , metadata={"""help""": """Stride to use for the encoder."""} , )
class __lowerCamelCase :
def __init__( self , snake_case_=192 , snake_case_=32 , snake_case_=4 , snake_case_=0.6 ) -> List[Any]:
UpperCamelCase__ = input_size
UpperCamelCase__ = mask_patch_size
UpperCamelCase__ = model_patch_size
UpperCamelCase__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase__ = self.input_size // self.mask_patch_size
UpperCamelCase__ = self.mask_patch_size // self.model_patch_size
UpperCamelCase__ = self.rand_size**2
UpperCamelCase__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
UpperCamelCase__ = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase__ = np.zeros(self.token_count , dtype=snake_case_ )
UpperCamelCase__ = 1
UpperCamelCase__ = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase__ = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCAmelCase_( ) -> Any:
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
UpperCamelCase__ = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase__ = split['train']
UpperCamelCase__ = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
UpperCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowercase , 'decoder_type' ):
UpperCamelCase__ = 'simmim'
# adapt config
UpperCamelCase__ = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
UpperCamelCase__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase__ = AutoModelForMaskedImageModeling.from_config(__lowercase )
if training_args.do_train:
UpperCamelCase__ = ds['train'].column_names
else:
UpperCamelCase__ = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ = 'image'
elif "img" in column_names:
UpperCamelCase__ = 'img'
else:
UpperCamelCase__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase__ = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [transforms(__lowercase ) for image in examples[image_column_name]]
UpperCamelCase__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''PerceiverFeatureExtractor''']
lowerCamelCase__ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase__ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __magic_name__ (unittest.TestCase ):
def __a ( self , _a , _a = None , _a = None , _a = None , _a = True , ) -> int:
lowerCAmelCase_ = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a , _a ) )]
if identifier is not None:
lowerCAmelCase_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a , _a ):
for n_ in n_identifier:
lowerCAmelCase_ = [file for file in files if n_ not in file]
else:
lowerCAmelCase_ = [file for file in files if n_identifier not in file]
lowerCAmelCase_ = ignore_files or []
ignore_files.append("__init__.py" )
lowerCAmelCase_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _a )
if only_modules:
lowerCAmelCase_ = file.split("." )[0]
try:
lowerCAmelCase_ = getattr(_a , _a )
lowerCAmelCase_ = doctest.DocTestSuite(_a )
lowerCAmelCase_ = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
lowerCAmelCase_ = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __a ( self ) -> int:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "modeling"
lowerCAmelCase_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_a , identifier=_a , ignore_files=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "tokenization"
self.analyze_directory(_a , identifier=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "configuration"
self.analyze_directory(_a , identifier=_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_a , n_identifier=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = Path("docs/source" )
lowerCAmelCase_ = ["favicon.ico"]
self.analyze_directory(_a , ignore_files=_a , only_modules=_a )
| 122 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_UpperCAmelCase : Union[str, Any] = get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(_snake_case , _snake_case )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case_ = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=_snake_case , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case_ = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_snake_case , storage_reader=dist_cp.FileSystemReader(_snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict['model']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_snake_case )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(_snake_case , _snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case_ = os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case_ = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case_ = torch.load(_snake_case )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case_ = (
os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(_snake_case ) , )
snake_case_ = optim_state['optimizer']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case_ = FSDP.optim_state_dict_to_load(_snake_case , _snake_case , _snake_case )
optimizer.load_state_dict(_snake_case )
| 708 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 108 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
def A_( A : int = 10):
if not isinstance(A , A) or n < 0:
raise ValueError('Invalid input')
UpperCamelCase = 10**n
UpperCamelCase = 2_8433 * (pow(2 , 783_0457 , A)) + 1
return str(number % modulus)
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 432 |
'''simple docstring'''
import argparse
import copy
def A_( A : Optional[int]):
UpperCamelCase = {}
with open(A) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def A_( A : Union[str, Any] , A : str):
with open(A) as f:
UpperCamelCase = f.read(1)
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(A) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(A)
UpperCamelCase = distance_of_first_solution + int(A)
UpperCamelCase = best_node
first_solution.append(A)
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def A_( A : List[Any] , A : str):
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(A)
for kn in solution[1:-1]:
UpperCamelCase = solution.index(A)
if n == kn:
continue
UpperCamelCase = copy.deepcopy(A)
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(A) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1])
_tmp.append(A)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
UpperCamelCase = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda A: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def A_( A : List[str] , A : Any , A : Any , A : List[Any] , A : Any):
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(A , A)
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(A) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(A):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(A) >= size:
tabu_list.pop(0)
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def A_( A : Optional[Any]=None):
UpperCamelCase = generate_neighbours(args.File)
UpperCamelCase , UpperCamelCase = generate_first_solution(
args.File , A)
UpperCamelCase , UpperCamelCase = tabu_search(
A , A , A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''')
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 432 | 1 |
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = {}
def lowerCamelCase__ ( self :Optional[int] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase_ , " -> " , " -> ".join([str(lowerCamelCase_ ) for j in self.vertex[i]] ) )
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase_ )
else:
# else make a new vertex
UpperCamelCase__ = [to_vertex]
def lowerCamelCase__ ( self :Optional[int] ) -> None:
"""simple docstring"""
UpperCamelCase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = True
print(lowerCamelCase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 516 | """simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : int = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : List[Any] = parser.parse_args()
if args.model_type == "roberta":
A : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Optional[int] = 'roberta'
elif args.model_type == "gpt2":
A : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : Optional[Any] = 'transformer'
A : Optional[Any] = model.state_dict()
A : str = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : List[str] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : List[str] = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : Tuple = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[Any] = state_dict[param_name]
# Transformer Blocks #
A : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : Optional[Any] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : str = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : int = state_dict[F"lm_head.dense.{w}"]
A : int = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"{prefix}.ln_f.{w}"]
A : Any = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 516 | 1 |
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> list[int]:
'''simple docstring'''
_UpperCamelCase : str = int(UpperCAmelCase )
# Initialize Result
_UpperCamelCase : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(UpperCAmelCase ):
# Find denominations
while int(UpperCAmelCase ) >= int(UpperCAmelCase ):
total_value -= int(UpperCAmelCase )
answer.append(UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Union[str, Any] = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCAmelCase_ : Any = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
lowerCAmelCase_ : int = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase_ : Optional[int] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
lowerCAmelCase_ : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 204 | '''simple docstring'''
import requests
from bsa import BeautifulSoup
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = BeautifulSoup(requests.get(UpperCAmelCase ,params=UpperCAmelCase ).content ,"html.parser" )
_UpperCamelCase : Union[str, Any] = soup.find("div" ,attrs={"class": "gs_ri"} )
_UpperCamelCase : Optional[Any] = div.find("div" ,attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 204 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple = CpmAntTokenizer
UpperCamelCase_ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCamelCase = "今天天气真好!"
UpperCamelCase = ["今天", "天气", "真", "好", "!"]
UpperCamelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = "今天天气真好!"
UpperCamelCase = [tokenizer.bos_token] + tokens
UpperCamelCase = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
UpperCamelCase = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 554 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : torch.FloatTensor
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.18215 , UpperCAmelCase_ : str = "group" , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase = Encoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , )
UpperCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
UpperCamelCase = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_ )
UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
# pass init params to Decoder
UpperCamelCase = Decoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , )
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True )-> VQEncoderOutput:
"""simple docstring"""
UpperCamelCase = self.encoder(UpperCAmelCase_ )
UpperCamelCase = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
# also go through quantization layer
if not force_not_quantize:
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.quantize(UpperCAmelCase_ )
else:
UpperCamelCase = h
UpperCamelCase = self.post_quant_conv(UpperCAmelCase_ )
UpperCamelCase = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase = sample
UpperCamelCase = self.encode(UpperCAmelCase_ ).latents
UpperCamelCase = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 554 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=1_0 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : int=3_2 * 8 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=6_4 , ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_auxiliary_loss
__lowerCamelCase : Optional[int] = num_queries
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = min_size
__lowerCamelCase : Optional[Any] = max_size
__lowerCamelCase : str = num_labels
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Optional[Any] = hidden_dim
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
__lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
__lowerCamelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase : List[Any] = self.num_queries
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = [1, 1, 1, 1]
__lowerCamelCase : Optional[int] = self.num_channels
__lowerCamelCase : Optional[int] = 6_4
__lowerCamelCase : int = 1_2_8
__lowerCamelCase : Any = self.hidden_dim
__lowerCamelCase : List[str] = self.hidden_dim
__lowerCamelCase : List[Any] = self.hidden_dim
return config
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = output.encoder_hidden_states
__lowerCamelCase : Any = output.pixel_decoder_hidden_states
__lowerCamelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def _snake_case ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase : Any = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
__lowerCamelCase : str = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a_ : Optional[Any] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = MaskaFormerModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _snake_case ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase : Dict = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
__lowerCamelCase : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCamelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
__lowerCamelCase : Any = self.model_tester.get_config()
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : int = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase : str = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowerCamelCase : Tuple = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
__lowerCamelCase : Any = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : str = 1E-4
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self : Dict ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : str = model(**_lowerCamelCase )
__lowerCamelCase : Dict = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Tuple = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Any = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : int = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase : Any = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
__lowerCamelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase : int = inputs["""pixel_values"""].to(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs["""mask_labels"""]]
__lowerCamelCase : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 458 |
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowerCamelCase : Dict = hex_num[0] == """-"""
if is_negative:
__lowerCamelCase : Optional[Any] = hex_num[1:]
try:
__lowerCamelCase : Any = int(UpperCAmelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowerCamelCase : List[str] = """"""
while int_num > 0:
__lowerCamelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 458 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCamelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
_lowerCamelCase = json.load(f)
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self , a__):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(a__).to(a__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
])
@slow
def __snake_case ( self , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = F"""facebook/wmt19-{pair}"""
_lowerCamelCase : Any = self.get_tokenizer(a__)
_lowerCamelCase : List[str] = self.get_model(a__)
_lowerCamelCase : Optional[int] = bleu_data[pair]['''src''']
_lowerCamelCase : Optional[Any] = bleu_data[pair]['''tgt''']
_lowerCamelCase : Tuple = tokenizer(a__ , return_tensors='''pt''' , truncation=a__ , padding='''longest''').to(a__)
_lowerCamelCase : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(
a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__)
_lowerCamelCase : Dict = calculate_bleu(a__ , a__)
print(a__)
self.assertGreaterEqual(scores['''bleu'''] , a__)
| 114 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] ="xlm"
lowerCamelCase__ : Any ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , lowerCamelCase=30145 , lowerCamelCase=2048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : str = emb_dim
__magic_name__ : Union[str, Any] = n_layers
__magic_name__ : Optional[Any] = n_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[Any] = gelu_activation
__magic_name__ : Any = sinusoidal_embeddings
__magic_name__ : List[Any] = causal
__magic_name__ : Optional[Any] = asm
__magic_name__ : Tuple = n_langs
__magic_name__ : Union[str, Any] = use_lang_emb
__magic_name__ : str = layer_norm_eps
__magic_name__ : int = bos_index
__magic_name__ : int = eos_index
__magic_name__ : Any = pad_index
__magic_name__ : int = unk_index
__magic_name__ : Tuple = mask_index
__magic_name__ : int = is_encoder
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = embed_init_std
__magic_name__ : int = init_std
__magic_name__ : Optional[Any] = summary_type
__magic_name__ : List[str] = summary_use_proj
__magic_name__ : Optional[Any] = summary_activation
__magic_name__ : Union[str, Any] = summary_proj_to_labels
__magic_name__ : int = summary_first_dropout
__magic_name__ : Dict = start_n_top
__magic_name__ : int = end_n_top
__magic_name__ : Optional[int] = mask_token_id
__magic_name__ : Dict = lang_id
if "n_words" in kwargs:
__magic_name__ : str = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 154 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : Optional[Any] = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A : Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : int = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[int] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 334 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = to_pil_image(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = pil_image.size
lowerCAmelCase__ :Dict = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase__ :List[str] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
lowerCAmelCase__ :Optional[Any] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :str = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :int = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :List[Any] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowerCAmelCase__ :str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :int = do_resize
lowerCAmelCase__ :Union[str, Any] = size
lowerCAmelCase__ :List[str] = resample
lowerCAmelCase__ :int = do_rescale
lowerCAmelCase__ :int = rescale_value
lowerCAmelCase__ :List[Any] = do_normalize
lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ :Tuple = apply_ocr
lowerCAmelCase__ :List[str] = ocr_lang
lowerCAmelCase__ :Any = tesseract_config
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCAmelCase__ :Tuple = (size['height'], size['width'])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Tuple = size if size is not None else self.size
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Dict = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCAmelCase__ :Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCAmelCase__ :int = words_batch
lowerCAmelCase__ :Tuple = boxes_batch
return data
| 93 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 2_55 , A = True , A = None , A = None , A = True , **A , ) -> None:
'''simple docstring'''
super().__init__(**A )
__magic_name__ = size if size is not None else {'''shortest_edge''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A )
__magic_name__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__magic_name__ = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ = do_convert_rgb
def __A ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __A ( self , A , A , A = None , **A , ) -> Any:
'''simple docstring'''
return rescale(A , scale=A , data_format=A , **A )
def __A ( self , A , A , A , A = None , **A , ) -> np.ndarray:
'''simple docstring'''
return normalize(A , mean=A , std=A , data_format=A , **A )
def __A ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
'''simple docstring'''
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(A , param_name='''size''' , default_to_square=A )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(A ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=A , mean=A , std=A ) for image in images]
__magic_name__ = [to_channel_dimension_format(A , A ) for image in images]
__magic_name__ = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A ) | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 ) -> None:
A__ , A__ = row, column
A__ = [[default_value for c in range(SCREAMING_SNAKE_CASE__ )] for r in range(SCREAMING_SNAKE_CASE__ )]
def __str__( self ) -> str:
A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(SCREAMING_SNAKE_CASE__ , len(str(SCREAMING_SNAKE_CASE__ ) ) )
A__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(SCREAMING_SNAKE_CASE__ ) -> str:
nonlocal string_format_identifier
A__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE__ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> bool:
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , SCREAMING_SNAKE_CASE__ ) -> Any:
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
A__ = value
def __add__( self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = -self[r, c]
return result
def __sub__( self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
return self + (-another)
def __mul__( self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # Scalar multiplication
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = f"""Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__ )})"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Matrix:
A__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c]
return result
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = Matrix(3, 3, 0 )
for i in range(3 ):
A__ = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
A__ = Matrix(3, 1, 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3, 1, 0 )
A__ , A__ , A__ = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase_, UpperCAmelCase_ )}""" )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 104 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self: Dict , __lowerCAmelCase: float , __lowerCAmelCase: Callable , __lowerCAmelCase: int , __lowerCAmelCase: float = 1.0 , __lowerCAmelCase: str = None , ) -> List[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = initial_learning_rate
__UpperCAmelCase = warmup_steps
__UpperCAmelCase = power
__UpperCAmelCase = decay_schedule_fn
__UpperCAmelCase = name
def __call__( self: List[str] , __lowerCAmelCase: Union[str, Any] ) -> str:
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__UpperCAmelCase = tf.cast(__lowerCAmelCase , tf.floataa )
__UpperCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
__UpperCAmelCase = global_step_float / warmup_steps_float
__UpperCAmelCase = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__lowerCAmelCase , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCAmelCase ( A_ : float , A_ : int , A_ : int , A_ : float = 0.0 , A_ : float = 0.9 , A_ : float = 0.9_99 , A_ : float = 1e-8 , A_ : Optional[float] = None , A_ : Optional[float] = None , A_ : float = 0.0 , A_ : float = 1.0 , A_ : Optional[List[str]] = None , ) -> Dict:
__UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
__UpperCAmelCase = WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
__UpperCAmelCase = AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=A_ , )
else:
__UpperCAmelCase = tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Dict , __lowerCAmelCase: Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , __lowerCAmelCase: float = 0.9 , __lowerCAmelCase: float = 0.999 , __lowerCAmelCase: float = 1E-7 , __lowerCAmelCase: bool = False , __lowerCAmelCase: float = 0.0 , __lowerCAmelCase: Optional[List[str]] = None , __lowerCAmelCase: Optional[List[str]] = None , __lowerCAmelCase: str = "AdamWeightDecay" , **__lowerCAmelCase: List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = weight_decay_rate
__UpperCAmelCase = include_in_weight_decay
__UpperCAmelCase = exclude_from_weight_decay
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , __lowerCAmelCase: str ) -> int:
'''simple docstring'''
__UpperCAmelCase = {"WarmUp": WarmUp}
return super(__lowerCAmelCase , cls ).from_config(__lowerCAmelCase , custom_objects=__lowerCAmelCase )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: str , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple ) -> Tuple:
'''simple docstring'''
super(__lowerCAmelCase , self )._prepare_local(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: int , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str]=None , **__lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = list(zip(*__lowerCAmelCase ) )
return super(__lowerCAmelCase , self ).apply_gradients(zip(__lowerCAmelCase , __lowerCAmelCase ) , name=__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__UpperCAmelCase = apply_state or {}
__UpperCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__UpperCAmelCase = self._fallback_apply_state(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: str=None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
__UpperCAmelCase = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_dense(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Any , __lowerCAmelCase: Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
__UpperCAmelCase = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_sparse(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: str ) -> Tuple:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return False
return True
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: int ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = None
@property
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self._accum_steps is None:
__UpperCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _UpperCAmelCase ( self: str ) -> Union[str, Any]:
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self: str , __lowerCAmelCase: Dict ) -> int:
'''simple docstring'''
if not self._gradients:
__UpperCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCAmelCase ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , __lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCAmelCase )
self._accum_steps.assign_add(1 )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCAmelCase ) )
| 704 | import inspect
import unittest
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _UpperCAmelCase ( self: Dict ) -> Dict:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__UpperCAmelCase = inspect.getmembers(__lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__UpperCAmelCase = "k-diffusion"
elif backend == "invisible_watermark":
__UpperCAmelCase = "invisible-watermark"
assert backend in deps, F'''{backend} is not in the deps table!'''
| 286 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
snake_case : List[Any] = RoFormerTokenizer
snake_case : Optional[Any] = RoFormerTokenizerFast
snake_case : List[str] = True
snake_case : List[Any] = True
def _lowerCamelCase ( self ):
super().setUp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **lowercase_ )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **lowercase_ )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """永和服装饰品有限公司,今天天气非常好"""
UpperCamelCase__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ , UpperCamelCase__ = self.get_chinese_input_output_texts()
UpperCamelCase__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ , UpperCamelCase__ = self.get_chinese_input_output_texts()
UpperCamelCase__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
| 619 | '''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A_ ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->str:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class _a :
"""simple docstring"""
A_ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
A_ = field(
default=__a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
A_ = field(
default=__a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
A_ = field(
default=__a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
A_ = field(
default=__a , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
A_ = field(
default=__a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
A_ = list_field(
default=__a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict:
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class _a :
"""simple docstring"""
def __init__( self : str , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = args
lowercase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
lowercase_ = csv.DictReader(lowercase_ )
for row in reader:
lowercase_ = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
lowercase_ = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
lowercase_ = float(row["""result"""] )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ = plt.subplots()
lowercase_ = """Time usage""" if self.args.is_time else """Memory usage"""
lowercase_ = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowercase_ = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
lowercase_ = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
lowercase_ = self.result_dict[model_name]["""result"""]
((lowercase_) , (lowercase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowercase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowercase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowercase_ , )
else:
lowercase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowercase_) , (lowercase_)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
lowercase_ = np.asarray(lowercase_ , lowercase_ )[: len(lowercase_ )]
plt.scatter(
lowercase_ , lowercase_ , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(lowercase_ , lowercase_ , """--""" )
title_str += F""" {label_model_name} vs."""
lowercase_ = title_str[:-4]
lowercase_ = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(lowercase_ )
plt.xlabel(lowercase_ )
plt.ylabel(lowercase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A_ ( ) ->Tuple:
lowercase_ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase_ = parser.parse_args_into_dataclasses()[0]
lowercase_ = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 451 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , __UpperCamelCase: Dict[str, int] , __UpperCamelCase: List[str] , __UpperCamelCase: int = None , __UpperCamelCase: int = None ):
super().__init__()
_a = pad_token_id
_a = max_length
_a = vocab
_a = merges
_a = BytePairTokenizer(__UpperCamelCase , __UpperCamelCase , sequence_length=__UpperCamelCase )
@classmethod
def _A ( cls: Any , __UpperCamelCase: GPTaTokenizer , *__UpperCamelCase: Dict , **__UpperCamelCase: Union[str, Any] ):
_a = [''' '''.join(__UpperCamelCase ) for m in tokenizer.bpe_ranks.keys()]
_a = tokenizer.get_vocab()
return cls(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
@classmethod
def _A ( cls: Any , __UpperCamelCase: Union[str, os.PathLike] , *__UpperCamelCase: List[Any] , **__UpperCamelCase: int ):
_a = GPTaTokenizer.from_pretrained(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
return cls.from_tokenizer(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
@classmethod
def _A ( cls: List[str] , __UpperCamelCase: List[str] ):
return cls(**__UpperCamelCase )
def _A ( self: Dict ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _A ( self: int , __UpperCamelCase: Tuple , __UpperCamelCase: int = None ):
_a = self.tf_tokenizer(__UpperCamelCase )
_a = tf.ones_like(__UpperCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
_a = max_length if max_length is not None else self.max_length
if max_length is not None:
_a , _a = pad_model_inputs(
__UpperCamelCase , max_seq_length=__UpperCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 346 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase :Dict = random.Random()
def __snake_case ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: int=7 , __UpperCamelCase: Any=400 , __UpperCamelCase: List[str]=2000 , __UpperCamelCase: Union[str, Any]=2048 , __UpperCamelCase: int=128 , __UpperCamelCase: Optional[int]=1 , __UpperCamelCase: Tuple=512 , __UpperCamelCase: List[Any]=30 , __UpperCamelCase: Dict=4_4100 , ):
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = spectrogram_length
_a = feature_size
_a = num_audio_channels
_a = hop_length
_a = chunk_length
_a = sampling_rate
def _A ( self: int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self: List[Any] , __UpperCamelCase: List[Any]=False , __UpperCamelCase: List[str]=False ):
def _flatten(__UpperCamelCase: Tuple ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Union[str, Any] = TvltFeatureExtractor
def _A ( self: Optional[Any] ):
_a = TvltFeatureExtractionTester(self )
def _A ( self: Optional[Any] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''sampling_rate''' ) )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_a = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__UpperCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__UpperCamelCase )
_a = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
# Initialize feature_extractor
_a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a = feature_extractor(
__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(__UpperCamelCase )
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self: Optional[int] , __UpperCamelCase: Dict ):
_a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a = ds.sort('''id''' ).select(range(__UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _A ( self: Optional[Any] ):
_a = self._load_datasamples(1 )
_a = TvltFeatureExtractor()
_a = feature_extractor(__UpperCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_a = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1E-4 ) )
| 346 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : str = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 439 |
import re
from filelock import FileLock
try:
import nltk
a_ : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 439 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase : int = pd.read_csv('sample_data.csv', header=None)
__lowerCAmelCase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase : Tuple = df.iloc[:, 1:2]
__lowerCAmelCase : List[Any] = actual_data.values.reshape(len_data, 1)
__lowerCAmelCase : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase : int = 10
__lowerCAmelCase : Dict = 5
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : int = len_data - periods * look_back
__lowerCAmelCase : str = actual_data[:division]
__lowerCAmelCase : int = actual_data[division - look_back :]
__lowerCAmelCase : Optional[int] = [], []
__lowerCAmelCase : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase : Any = np.array(train_x)
__lowerCAmelCase : int = np.array(test_x)
__lowerCAmelCase : Optional[int] = np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase : Dict = np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase : Optional[int] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__lowerCAmelCase : str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase : Optional[int] = model.predict(x_test)
| 705 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = size if size is not None else {"""shortest_edge""": 256}
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__magic_name__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 76 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : list[tuple[float, float]] ) -> Any:
_A = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A = len(UpperCamelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, UpperCamelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase__ ), 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = self.basis_function(UpperCamelCase__ )
_A = 0.0
_A = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : float = 0.01 ) -> List[Any]:
from matplotlib import pyplot as plt # type: ignore
_A = [] # x coordinates of points to plot
_A = [] # y coordinates of points to plot
_A = 0.0
while t <= 1:
_A = self.bezier_curve_function(UpperCamelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A = [i[0] for i in self.list_of_points]
_A = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase__, UpperCamelCase__, color='blue', label='Curve of Degree ' + str(self.degree ), )
plt.scatter(UpperCamelCase__, UpperCamelCase__, color='red', label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'xmod'
def __init__( self : Any , __A : str=3_0_5_2_2 , __A : Dict=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : int=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Union[str, Any]=0.1 , __A : Optional[int]=0.1 , __A : Dict=5_1_2 , __A : List[Any]=2 , __A : Optional[int]=0.0_2 , __A : Union[str, Any]=1e-12 , __A : Optional[int]=1 , __A : Optional[int]=0 , __A : Dict=2 , __A : Union[str, Any]="absolute" , __A : Optional[Any]=True , __A : Dict=None , __A : List[str]=False , __A : Optional[int]=2 , __A : List[str]=False , __A : List[Any]=True , __A : Union[str, Any]=True , __A : str=("en_XX",) , __A : int=None , **__A : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
_lowercase = pre_norm
_lowercase = adapter_reduction_factor
_lowercase = adapter_layer_norm
_lowercase = adapter_reuse_layer_norm
_lowercase = ln_before_adapter
_lowercase = list(__A )
_lowercase = default_language
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 497 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[int] = 256
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a_ = ['melgan']
def __init__( self : Tuple , __A : SpectrogramNotesEncoder , __A : SpectrogramContEncoder , __A : TaFilmDecoder , __A : DDPMScheduler , __A : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
snake_case__ : List[Any] = math.log(1e-5 ) # Matches MelGAN training.
snake_case__ : Any = 4.0 # Largest value for most examples
snake_case__ : Optional[int] = 1_2_8
self.register_modules(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
def _lowercase ( self : List[str] , __A : List[Any] , __A : Any=(-1.0, 1.0) , __A : Optional[int]=False ):
snake_case__, snake_case__ : Optional[int] = output_range
if clip:
snake_case__ : Dict = torch.clip(UpperCamelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
snake_case__ : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : Tuple , __A : Any , __A : Any=(-1.0, 1.0) , __A : Optional[Any]=False ):
snake_case__, snake_case__ : Tuple = input_range
snake_case__ : Optional[int] = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs
# Scale to [0, 1].
snake_case__ : Dict = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : str , __A : Dict , __A : int , __A : Dict ):
snake_case__ : Optional[int] = input_tokens > 0
snake_case__, snake_case__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
snake_case__, snake_case__ : Tuple = self.continuous_encoder(
encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Optional[int] , __A : int , __A : str , __A : Union[str, Any] ):
snake_case__ : Optional[int] = noise_time
if not torch.is_tensor(UpperCamelCase_ ):
snake_case__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
snake_case__ : Tuple = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
snake_case__ : Any = self.decoder(
encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , __A : List[List[int]] , __A : Optional[torch.Generator] = None , __A : int = 1_0_0 , __A : bool = True , __A : str = "numpy" , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(UpperCamelCase_ )}.''' )
snake_case__ : Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
snake_case__ : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
snake_case__ : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase_ ):
if i == 0:
snake_case__ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ : Union[str, Any] = ones
snake_case__ : Dict = self.scale_features(
UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ )
snake_case__ : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ : int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : str = self.decode(
encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
snake_case__ : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
snake_case__ : List[str] = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] )
snake_case__ : Optional[Any] = mel[:1]
snake_case__ : Optional[Any] = mel.cpu().float().numpy()
snake_case__ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ )
logger.info("Generated segment" , UpperCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
snake_case__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ : List[Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__SCREAMING_SNAKE_CASE: str = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCamelCase ( __a : int , __a : Optional[Any] , __a : int , __a : Union[str, Any] , __a : Any ):
for attribute in key.split("." ):
_lowercase =getattr(__a , __a )
if weight_type is not None:
_lowercase =getattr(__a , __a ).shape
else:
_lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
else:
_lowercase =value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowerCamelCase ( __a : int , __a : Optional[Any] ):
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.feature_extractor
_lowercase =hf_model.adapter
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
_lowercase =True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(__a , __a , __a , __a )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__a )[0].split("." )[-2]
_lowercase =mapped_key.replace("*" , __a )
if "weight_g" in name:
_lowercase ="weight_g"
elif "weight_v" in name:
_lowercase ="weight_v"
elif "bias" in name:
_lowercase ="bias"
elif "weight" in name:
_lowercase ="weight"
else:
_lowercase =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __a : Optional[Any] , __a : Optional[int] , __a : Tuple , __a : Union[str, Any] , __a : List[Any] ):
_lowercase =full_name.split("conv_layers." )[-1]
_lowercase =name.split("." )
_lowercase =int(items[0] )
_lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowercase =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowercase =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def __lowerCamelCase ( __a : List[str] , __a : int , __a : Any , __a : List[str] ):
_lowercase =full_name.split("adaptor." )[-1]
_lowercase =name.split("." )
if items[1].isdigit():
_lowercase =int(items[1] )
else:
_lowercase =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_lowercase =value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_lowercase =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_lowercase =value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_lowercase =value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__a , __a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_lowercase =value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_lowercase =value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def __lowerCamelCase ( __a : Union[str, Any] ):
_lowercase , _lowercase =emb.weight.shape
_lowercase =nn.Linear(__a , __a , bias=__a )
_lowercase =emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCamelCase ( __a : Union[str, Any] , __a : List[Any] , __a : Optional[Any] , __a : List[str] , __a : Optional[int] , __a : Dict , __a : List[str] , __a : Optional[int] , __a : str , __a : Union[str, Any] , __a : int , ):
_lowercase =WavaVecaConfig.from_pretrained(
__a , add_adapter=__a , adapter_stride=__a , adapter_kernel_size=__a , use_auth_token=__a , output_hidden_size=__a , )
_lowercase =MBartConfig.from_pretrained(__a )
# load model
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
_lowercase =model[0].eval()
# load feature extractor
_lowercase =WavaVecaFeatureExtractor.from_pretrained(__a , use_auth_token=__a )
# set weights for wav2vec2 encoder
_lowercase =WavaVecaModel(__a )
recursively_load_weights_wavaveca(model.encoder , __a )
# load decoder weights
_lowercase =MBartForCausalLM(__a )
_lowercase , _lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__a )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_lowercase =SpeechEncoderDecoderModel(encoder=__a , decoder=__a )
_lowercase =False
_lowercase =MBartaaTokenizer(__a )
tokenizer.save_pretrained(__a )
_lowercase =hf_wavavec.config.to_dict()
_lowercase =tokenizer.pad_token_id
_lowercase =tokenizer.bos_token_id
_lowercase =tokenizer.eos_token_id
_lowercase ="mbart50"
_lowercase ="wav2vec2"
_lowercase =tokenizer.eos_token_id
_lowercase =250_004
_lowercase =tokenizer.eos_token_id
_lowercase =SpeechEncoderDecoderConfig.from_dict(__a )
hf_wavavec.save_pretrained(__a )
feature_extractor.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_0_2_4, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=2_5_0_0_0_4, type=int, help="`decoder_start_token_id` of model config")
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 701 | import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowerCAmelCase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCAmelCase__ = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase__ = BeautifulSoup(res.text, "html.parser")
lowerCAmelCase__ = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 594 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
def snake_case ( self: List[Any] ):
__UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a ,'num_attention_heads' ) )
class _lowercase:
"""simple docstring"""
def __init__( self: str ,a: Union[str, Any] ,a: Optional[Any]=13 ,a: Dict=64 ,a: int=3 ,a: str=3 ,a: Any=2 ,a: List[Any]=1 ,a: str=16 ,a: Union[str, Any]=[128, 256, 384] ,a: Optional[int]=[4, 6, 8] ,a: List[Any]=[2, 3, 4] ,a: Dict=[16, 16, 16] ,a: Dict=0 ,a: Optional[Any]=[2, 2, 2] ,a: List[str]=[2, 2, 2] ,a: int=0.02 ,a: Dict=True ,a: Optional[Any]=True ,a: List[Any]=2 ,):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = kernel_size
__UpperCAmelCase = stride
__UpperCAmelCase = padding
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = depths
__UpperCAmelCase = key_dim
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = patch_size
__UpperCAmelCase = attention_ratio
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = initializer_range
__UpperCAmelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = num_labels
__UpperCAmelCase = initializer_range
def snake_case ( self: int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self: Dict ):
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def snake_case ( self: Optional[int] ,a: Optional[int] ,a: Any ,a: List[str] ):
__UpperCAmelCase = LevitModel(config=a )
model.to(a )
model.eval()
__UpperCAmelCase = model(a )
__UpperCAmelCase = (self.image_size, self.image_size)
__UpperCAmelCase , __UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def snake_case ( self: Any ,a: Optional[Any] ,a: str ,a: Dict ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = LevitForImageClassification(a )
model.to(a )
model.eval()
__UpperCAmelCase = model(a ,labels=a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self: Tuple ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self: Tuple ):
__UpperCAmelCase = LevitModelTester(self )
__UpperCAmelCase = ConfigTester(self ,config_class=a ,has_text_modality=a ,hidden_size=37 )
def snake_case ( self: str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self: int ):
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def snake_case ( self: Tuple ):
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def snake_case ( self: str ):
pass
@unittest.skip(reason='Levit does not output attentions' )
def snake_case ( self: Optional[int] ):
pass
def snake_case ( self: Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(a )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a )
def snake_case ( self: str ):
def check_hidden_states_output(a: Union[str, Any] ,a: Tuple ,a: Any ):
__UpperCAmelCase = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(a ,a ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(a ) ,a )
__UpperCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCAmelCase , __UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(a ,a ,a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(a ,a ,a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self: int ):
pass
def snake_case ( self: Union[str, Any] ,a: Dict ,a: Optional[Any] ,a: List[Any]=False ):
__UpperCAmelCase = super()._prepare_for_class(a ,a ,return_labels=a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self: int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def snake_case ( self: Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def snake_case ( self: Dict ):
if not self.model_tester.is_training:
return
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCAmelCase = model_class(a )
model.to(a )
model.train()
__UpperCAmelCase = self._prepare_for_class(a ,a ,return_labels=a )
__UpperCAmelCase = model(**a ).loss
loss.backward()
def snake_case ( self: Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCAmelCase = False
__UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCAmelCase = model_class(a )
model.gradient_checkpointing_enable()
model.to(a )
model.train()
__UpperCAmelCase = self._prepare_for_class(a ,a ,return_labels=a )
__UpperCAmelCase = model(**a ).loss
loss.backward()
def snake_case ( self: int ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
__UpperCAmelCase = problem_type['title']
__UpperCAmelCase = problem_type['num_labels']
__UpperCAmelCase = model_class(a )
model.to(a )
model.train()
__UpperCAmelCase = self._prepare_for_class(a ,a ,return_labels=a )
if problem_type["num_labels"] > 1:
__UpperCAmelCase = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
__UpperCAmelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a ) as warning_list:
__UpperCAmelCase = model(**a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case ( self: Dict ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = LevitModel.from_pretrained(a )
self.assertIsNotNone(a )
def __snake_case ( ):
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self: int ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self: Union[str, Any] ):
__UpperCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=a ,return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**a )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a )
__UpperCAmelCase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a ,atol=1e-4 ) )
| 396 | '''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCamelCase : List[str] = logging.get_logger(__name__)
class _lowercase( enum.Enum ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 1
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''generated'''
def __init__( self: int ,*a: Optional[Any] ,**a: Optional[Any] ):
super().__init__(*a ,**a )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self: Tuple ,a: Union[str, Any]=None ,a: Any=None ,a: Tuple=None ,a: Tuple=None ,a: Any=None ,a: List[Any]=None ,**a: Any ,):
__UpperCAmelCase = {}
if truncation is not None:
__UpperCAmelCase = truncation
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
__UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(a ,add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self: Optional[int] ,a: int ,a: int ,a: int ):
return True
def snake_case ( self: List[Any] ,*a: Tuple ,a: int ):
__UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,a ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__UpperCAmelCase = ([prefix + arg for arg in args[0]],)
__UpperCAmelCase = True
elif isinstance(args[0] ,a ):
__UpperCAmelCase = (prefix + args[0],)
__UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__UpperCAmelCase = self.tokenizer(*a ,padding=a ,truncation=a ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self: Union[str, Any] ,*a: Optional[int] ,**a: Optional[Any] ):
__UpperCAmelCase = super().__call__(*a ,**a )
if (
isinstance(args[0] ,a )
and all(isinstance(a ,a ) for el in args[0] )
and all(len(a ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self: Optional[int] ,a: Union[str, Any] ,a: List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,**a: Any ):
__UpperCAmelCase = self._parse_and_tokenize(a ,truncation=a ,**a )
return inputs
def snake_case ( self: Union[str, Any] ,a: Optional[int] ,**a: Tuple ):
if self.framework == "pt":
__UpperCAmelCase , __UpperCAmelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
__UpperCAmelCase , __UpperCAmelCase = tf.shape(model_inputs['input_ids'] ).numpy()
__UpperCAmelCase = generate_kwargs.get('min_length' ,self.model.config.min_length )
__UpperCAmelCase = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(a ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
__UpperCAmelCase = self.model.generate(**a ,**a )
__UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__UpperCAmelCase = output_ids.reshape(a ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(a ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self: List[str] ,a: Tuple ,a: Optional[int]=ReturnType.TEXT ,a: Optional[Any]=False ):
__UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
a ,skip_special_tokens=a ,clean_up_tokenization_spaces=a ,)
}
records.append(a )
return records
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''summary'''
def __call__( self: Dict ,*a: Optional[Any] ,**a: int ):
return super().__call__(*a ,**a )
def snake_case ( self: Optional[Any] ,a: int ,a: int ,a: int ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = '''translation'''
def snake_case ( self: Optional[Any] ,a: int ,a: int ,a: int ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def snake_case ( self: Dict ,*a: Dict ,a: Any=TruncationStrategy.DO_NOT_TRUNCATE ,a: Optional[int]=None ,a: Any=None ):
if getattr(self.tokenizer ,'_build_translation_inputs' ,a ):
return self.tokenizer._build_translation_inputs(
*a ,return_tensors=self.framework ,truncation=a ,src_lang=a ,tgt_lang=a )
else:
return super()._parse_and_tokenize(*a ,truncation=a )
def snake_case ( self: Union[str, Any] ,a: List[str]=None ,a: Union[str, Any]=None ,**a: Optional[int] ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = super()._sanitize_parameters(**a )
if src_lang is not None:
__UpperCAmelCase = src_lang
if tgt_lang is not None:
__UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__UpperCAmelCase = kwargs.get('task' ,self.task )
__UpperCAmelCase = task.split('_' )
if task and len(a ) == 4:
# translation, XX, to YY
__UpperCAmelCase = items[1]
__UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self: Optional[Any] ,*a: Union[str, Any] ,**a: List[str] ):
return super().__call__(*a ,**a )
| 396 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit_text_model"
def __init__(self , _lowercase=49408 , _lowercase=512 , _lowercase=2048 , _lowercase=12 , _lowercase=8 , _lowercase=16 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=0 , _lowercase=49406 , _lowercase=49407 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__a : Tuple = vocab_size
__a : Union[str, Any] = hidden_size
__a : Tuple = intermediate_size
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Union[str, Any] = max_position_embeddings
__a : List[Any] = hidden_act
__a : Tuple = layer_norm_eps
__a : Tuple = attention_dropout
__a : List[Any] = initializer_range
__a : List[Any] = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : List[Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit_vision_model"
def __init__(self , _lowercase=768 , _lowercase=3072 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=768 , _lowercase=32 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Any = hidden_size
__a : Tuple = intermediate_size
__a : str = num_hidden_layers
__a : int = num_attention_heads
__a : int = num_channels
__a : List[Any] = image_size
__a : int = patch_size
__a : Any = hidden_act
__a : int = layer_norm_eps
__a : Union[str, Any] = attention_dropout
__a : str = initializer_range
__a : Union[str, Any] = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : List[str] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=512 , _lowercase=2.6592 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
if text_config is None:
__a : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
__a : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
__a : Tuple = OwlViTTextConfig(**_lowercase )
__a : Dict = OwlViTVisionConfig(**_lowercase )
__a : int = projection_dim
__a : List[str] = logit_scale_init_value
__a : List[str] = return_dict
__a : Any = 1.0
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : List[Any] = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
__a : Any = {}
__a : int = text_config
__a : Any = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : List[str] = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-4
def lowerCAmelCase__(self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = None , ):
'''simple docstring'''
__a : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
__a : Optional[int] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 14
| 707 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
a__ = """microsoft/speecht5_tts"""
a__ = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a__ = """text_reader"""
a__ = SpeechTaProcessor
a__ = SpeechTaForTextToSpeech
a__ = SpeechTaHifiGan
a__ = ["""text"""]
a__ = ["""audio"""]
def lowerCAmelCase_ (self ) -> Any:
if self.post_processor is None:
__UpperCAmelCase = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCAmelCase_ (self , lowercase__ , lowercase__=None ) -> Union[str, Any]:
__UpperCAmelCase = self.pre_processor(text=lowercase_ , return_tensors='''pt''' , truncation=lowercase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__UpperCAmelCase = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__UpperCAmelCase = torch.tensor(embeddings_dataset[7_305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
with torch.no_grad():
return self.model.generate_speech(**lowercase_ )
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
with torch.no_grad():
return self.post_processor(lowercase_ ).cpu().detach()
| 303 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = """gpt_bigcode"""
_lowerCAmelCase : Union[str, Any] = ["""past_key_values"""]
_lowerCAmelCase : List[str] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , lowercase_ : int=50257 , lowercase_ : List[Any]=1024 , lowercase_ : Optional[int]=768 , lowercase_ : List[str]=12 , lowercase_ : Any=12 , lowercase_ : List[str]=None , lowercase_ : Dict="gelu_pytorch_tanh" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=50256 , lowercase_ : Any=50256 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , **lowercase_ : List[Any] , ):
snake_case_ : int = vocab_size
snake_case_ : List[Any] = n_positions
snake_case_ : List[Any] = n_embd
snake_case_ : Any = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Any = n_inner
snake_case_ : Dict = activation_function
snake_case_ : Tuple = resid_pdrop
snake_case_ : Optional[int] = embd_pdrop
snake_case_ : List[str] = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[int] = scale_attn_weights
snake_case_ : str = use_cache
snake_case_ : Tuple = attention_softmax_in_fpaa
snake_case_ : Optional[int] = scale_attention_softmax_in_fpaa
snake_case_ : Optional[int] = multi_query
snake_case_ : str = bos_token_id
snake_case_ : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 123 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
_snake_case = parser.parse_args()
_snake_case = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 231 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
lowerCamelCase : Any = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[str] = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
lowerCamelCase : List[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
lowerCamelCase : Any = tempfile.mkdtemp()
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Any = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
# load decoder from hub
lowerCamelCase : Optional[int] = "hf-internal-testing/ngram-beam-search-decoder"
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : Any = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : Dict = self.get_decoder()
lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__A , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_decoder()
lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : List[str] = floats_list((3, 1000) )
lowerCamelCase : str = feature_extractor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(__A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.get_feature_extractor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = self.get_decoder()
lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : List[str] = "This is a test string"
lowerCamelCase : List[str] = processor(text=__A )
lowerCamelCase : Tuple = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self , __A=(2, 10, 16) , __A=77 ):
"""simple docstring"""
np.random.seed(__A )
return np.random.rand(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Any = self.get_decoder()
lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCamelCase : Optional[int] = processor.decode(__A )
lowerCamelCase : List[str] = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = self.get_feature_extractor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : str = self.get_decoder()
lowerCamelCase : int = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : List[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase : Optional[Any] = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
lowerCamelCase : List[str] = processor.batch_decode(__A , __A )
lowerCamelCase : Optional[int] = list(__A )
with get_context("fork" ).Pool() as p:
lowerCamelCase : Optional[int] = decoder.decode_beams_batch(__A , __A )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_feature_extractor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_decoder()
lowerCamelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : Any = self._get_dummy_logits()
lowerCamelCase : Union[str, Any] = 15
lowerCamelCase : List[Any] = -20.0
lowerCamelCase : Tuple = -4.0
lowerCamelCase : List[Any] = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
lowerCamelCase : Optional[Any] = decoded_processor_out.text
lowerCamelCase : Dict = list(__A )
with get_context("fork" ).Pool() as pool:
lowerCamelCase : Tuple = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
lowerCamelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __A , atol=1e-3 ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_feature_extractor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = self.get_decoder()
lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
lowerCamelCase : Optional[Any] = self._get_dummy_logits()
lowerCamelCase : Union[str, Any] = 2.0
lowerCamelCase : Optional[int] = 5.0
lowerCamelCase : Tuple = -20.0
lowerCamelCase : int = True
lowerCamelCase : Tuple = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
lowerCamelCase : Dict = decoded_processor_out.text
lowerCamelCase : Dict = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context("fork" ).Pool() as pool:
lowerCamelCase : Dict = decoder.decode_beams_batch(
__A , __A , )
lowerCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __A )
lowerCamelCase : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase : Tuple = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCamelCase : Tuple = os.listdir(__A )
lowerCamelCase : List[Any] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = snapshot_download("hf-internal-testing/processor_with_lm" )
lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__A )
lowerCamelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase : Optional[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCamelCase : List[str] = os.listdir(__A )
lowerCamelCase : Tuple = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase : Dict = floats_list((3, 1000) )
lowerCamelCase : List[Any] = processor_wavaveca(__A , return_tensors="np" )
lowerCamelCase : Optional[Any] = processor_auto(__A , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCamelCase : Optional[Any] = self._get_dummy_logits()
lowerCamelCase : Dict = processor_wavaveca.batch_decode(__A )
lowerCamelCase : int = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.get_feature_extractor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : List[Any] = self.get_decoder()
lowerCamelCase : Any = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _snake_case ( __A , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase : str = self._get_dummy_logits()[0]
lowerCamelCase : List[Any] = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase : Dict = self._get_dummy_logits()
lowerCamelCase : int = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__A , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _snake_case ( self ):
"""simple docstring"""
import torch
lowerCamelCase : Union[str, Any] = load_dataset("common_voice" , "en" , split="train" , streaming=__A )
lowerCamelCase : Dict = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) )
lowerCamelCase : List[str] = iter(__A )
lowerCamelCase : Any = next(__A )
lowerCamelCase : List[str] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
lowerCamelCase : List[str] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase : List[Any] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
lowerCamelCase : Optional[int] = model(__A ).logits.cpu().numpy()
lowerCamelCase : str = processor.decode(logits[0] , output_word_offsets=__A )
lowerCamelCase : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase : Tuple = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
lowerCamelCase : int = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__A , "word" ) ) , __A )
self.assertEqual(" ".join(self.get_from_offsets(__A , "word" ) ) , output.text )
# output times
lowerCamelCase : Tuple = torch.tensor(self.get_from_offsets(__A , "start_time" ) )
lowerCamelCase : Union[str, Any] = torch.tensor(self.get_from_offsets(__A , "end_time" ) )
# fmt: off
lowerCamelCase : str = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowerCamelCase : str = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 231 | 1 |
"""simple docstring"""
from ... import PretrainedConfig
lowerCAmelCase: str ={
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase = """nezha"""
def __init__( self , snake_case=2_1_1_2_8 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=6_4 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=0 , snake_case=2 , snake_case=3 , snake_case=True , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
lowercase : int = vocab_size
lowercase : Tuple = hidden_size
lowercase : Any = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : int = intermediate_size
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : List[str] = max_relative_position
lowercase : List[str] = type_vocab_size
lowercase : Dict = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = classifier_dropout
lowercase : Union[str, Any] = use_cache
| 607 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __snake_case ( __A ) -> Any:
def wrapper(*__A ,**__A ):
lowercase : Tuple = timeit.default_timer()
lowercase : List[Any] = func(*__A ,**__A )
lowercase : str = timeit.default_timer() - starttime
return delta
lowercase : Optional[int] = func.__name__
return wrapper
def __snake_case ( __A ,__A=100 ,__A=None ) -> Optional[int]:
lowercase : List[Any] = []
lowercase : Tuple = seq_shapes or {}
for i in range(__A ):
lowercase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__A ,_ArrayXD ):
lowercase : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__A ,datasets.Value ):
if v.dtype == "string":
lowercase : Tuple = """The small grey turtle was surprisingly fast when challenged."""
else:
lowercase : Optional[Any] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__A ,datasets.Sequence ):
while isinstance(__A ,datasets.Sequence ):
lowercase : List[Any] = v.feature
lowercase : Optional[int] = seq_shapes[k]
lowercase : List[str] = np.random.rand(*__A ).astype(v.dtype )
lowercase : Any = data
dummy_data.append((i, example) )
return dummy_data
def __snake_case ( __A ,__A ,__A=100 ,__A=None ) -> Optional[Any]:
lowercase : Tuple = generate_examples(__A ,num_examples=__A ,seq_shapes=__A )
with ArrowWriter(features=__A ,path=__A ) as writer:
for key, record in dummy_data:
lowercase : int = features.encode_example(__A )
writer.write(__A )
lowercase , lowercase : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
lowercase : Dict = datasets.Dataset.from_file(filename=__A ,info=datasets.DatasetInfo(features=__A ) )
return dataset
| 607 | 1 |
"""simple docstring"""
import cmath
import math
def lowercase_ ( _lowercase : float , _lowercase : float , _lowercase : float , _lowercase : float ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = math.radians(_lowercase )
UpperCAmelCase : Tuple = math.radians(_lowercase )
# Convert voltage and current to rectangular form
UpperCAmelCase : Any = cmath.rect(_lowercase , _lowercase )
UpperCAmelCase : List[str] = cmath.rect(_lowercase , _lowercase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self : Tuple , **lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**lowercase )
return config
def __lowerCAmelCase ( self : int , lowercase : List[str]=0 , **lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCAmelCase : Dict = kwargs.pop("num_inference_steps" , lowercase )
UpperCAmelCase : str = self.dummy_sample
UpperCAmelCase : Tuple = 0.1 * sample
UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**lowercase )
UpperCAmelCase : Tuple = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase : Any = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase , UpperCAmelCase : List[Any] = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase : List[str] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase : List[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : int , lowercase : int=0 , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase : Union[str, Any] = kwargs.pop("num_inference_steps" , lowercase )
UpperCAmelCase : Tuple = self.dummy_sample
UpperCAmelCase : Any = 0.1 * sample
UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : List[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase : str = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase : Dict = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase : Any = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : Optional[int]=None , **lowercase : int ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(**lowercase )
UpperCAmelCase : str = scheduler_class(**lowercase )
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Dict = self.get_scheduler_config(**lowercase )
UpperCAmelCase : int = scheduler_class(**lowercase )
UpperCAmelCase : List[str] = 10
UpperCAmelCase : Optional[Any] = self.dummy_model()
UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : List[str] = model(lowercase , lowercase )
UpperCAmelCase : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase : List[Any] = kwargs.pop("num_inference_steps" , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : str = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**lowercase )
UpperCAmelCase : Any = self.dummy_sample
UpperCAmelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , "set_timesteps" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , "set_timesteps" ):
UpperCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
UpperCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase : str = scheduler.timesteps[5]
UpperCAmelCase : List[Any] = scheduler.timesteps[6]
UpperCAmelCase : Union[str, Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase : int = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase : Tuple = self.full_loop(scheduler=lowercase )
UpperCAmelCase : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
UpperCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase : Dict = self.full_loop(scheduler=lowercase )
UpperCAmelCase : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
UpperCAmelCase : Union[str, Any] = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.full_loop()
UpperCAmelCase : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase : int = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCAmelCase : Tuple = scheduler_class(**lowercase )
UpperCAmelCase : Any = 10
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Any = model(lowercase , lowercase )
UpperCAmelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self : List[str] , **lowercase : Optional[int] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**lowercase )
UpperCAmelCase : Union[str, Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 292 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Any = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , a_ : Any=14_08 , a_ : str=61_44 , a_ : Tuple=39 , a_ : List[Any]=16 , a_ : List[str]=2_24 , a_ : Optional[Any]=14 , a_ : Union[str, Any]="gelu" , a_ : List[Any]=1E-6 , a_ : Tuple=0.0 , a_ : Union[str, Any]=1E-10 , a_ : Dict=True , **a_ : Optional[int] , )-> List[str]:
"""simple docstring"""
super().__init__(**a_ )
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : List[str] = qkv_bias
@classmethod
def a ( cls : Optional[Any] , a_ : Union[str, os.PathLike] , **a_ : Dict )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCAmelCase_ ,UpperCAmelCase_ : Dict = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCAmelCase_ : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = """instructblip_qformer"""
def __init__( self : Tuple , a_ : Dict=3_05_22 , a_ : Tuple=7_68 , a_ : Any=12 , a_ : int=12 , a_ : Optional[int]=30_72 , a_ : List[str]="gelu" , a_ : Optional[int]=0.1 , a_ : str=0.1 , a_ : Dict=5_12 , a_ : Dict=0.02 , a_ : Any=1E-12 , a_ : Any=0 , a_ : Dict="absolute" , a_ : List[str]=2 , a_ : List[Any]=14_08 , **a_ : Any , )-> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=a_ , **a_ )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : Tuple = cross_attention_frequency
UpperCAmelCase_ : List[Any] = encoder_hidden_size
@classmethod
def a ( cls : List[str] , a_ : Union[str, os.PathLike] , **a_ : List[str] )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCAmelCase_ ,UpperCAmelCase_ : int = cls.get_config_dict(a_ , **a_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCAmelCase_ : Any = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = """instructblip"""
UpperCamelCase_ : Optional[int] = True
def __init__( self : Optional[int] , a_ : Optional[int]=None , a_ : str=None , a_ : List[str]=None , a_ : Any=32 , **a_ : List[str] )-> Any:
"""simple docstring"""
super().__init__(**a_ )
if vision_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
UpperCAmelCase_ : Dict = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
UpperCAmelCase_ : Optional[int] = InstructBlipVisionConfig(**a_ )
UpperCAmelCase_ : List[str] = InstructBlipQFormerConfig(**a_ )
UpperCAmelCase_ : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
UpperCAmelCase_ : str = CONFIG_MAPPING[text_model_type](**a_ )
UpperCAmelCase_ : Union[str, Any] = self.text_config.tie_word_embeddings
UpperCAmelCase_ : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase_ : Optional[Any] = num_query_tokens
UpperCAmelCase_ : Optional[int] = self.vision_config.hidden_size
UpperCAmelCase_ : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ : Any = 1.0
UpperCAmelCase_ : int = 0.02
@classmethod
def a ( cls : int , a_ : InstructBlipVisionConfig , a_ : InstructBlipQFormerConfig , a_ : PretrainedConfig , **a_ : Tuple , )-> List[Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a_ , )
def a ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.vision_config.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.qformer_config.to_dict()
UpperCAmelCase_ : str = self.text_config.to_dict()
UpperCAmelCase_ : int = self.__class__.model_type
return output
| 470 |
"""simple docstring"""
def A_ ( lowercase ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = generate_pascal_triangle(lowercase )
for row_idx in range(lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = []
for current_row_idx in range(lowercase ):
UpperCAmelCase_ : Optional[Any] = populate_current_row(lowercase , lowercase )
triangle.append(lowercase )
return triangle
def A_ ( lowercase , lowercase ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = 1, 1
for current_col_idx in range(1 , lowercase ):
calculate_current_element(
lowercase , lowercase , lowercase , lowercase )
return current_row
def A_ ( lowercase , lowercase , lowercase , lowercase , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : str = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ : int = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ : Any = above_to_left_elt + above_to_right_elt
def A_ ( lowercase ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_ : list[list[int]] = [[1]]
for row_index in range(1 , lowercase ):
UpperCAmelCase_ : Any = [0] + result[-1] + [0]
UpperCAmelCase_ : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ : Dict = sum(divmod(lowercase , 2 ) )
UpperCAmelCase_ : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ : int = row_first_half + row_second_half
result.append(lowercase )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCAmelCase_ : int = f'''{func.__name__}({value})'''
UpperCAmelCase_ : int = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 470 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase ( __snake_case ):
def __init__( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = data
def __iter__( self : Optional[Any] ):
"""simple docstring"""
for element in self.data:
yield element
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any]=True ):
"""simple docstring"""
UpperCamelCase = Accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
if iterable:
UpperCamelCase = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
else:
UpperCamelCase = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
return dl
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , ):
"""simple docstring"""
UpperCamelCase = create_dataloader(accelerator=SCREAMING_SNAKE_CASE_ , dataset_size=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.nn.Linear(1 , 1 )
UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
UpperCamelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = ddp_model(batch[0].float() )
UpperCamelCase = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.nn.Linear(1 , 1 )
UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
UpperCamelCase = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = train_dl.batch_sampler.even_batches
UpperCamelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.nn.Linear(1 , 1 )
UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
UpperCamelCase = torch.nn.Linear(1 , 1 )
UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
UpperCamelCase = accelerator.state.distributed_type
UpperCamelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = original_state
if __name__ == "__main__":
main()
| 710 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 181 | 0 |
import heapq
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase__ , [-1 * len(lowerCAmelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_: Union[str, Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_: Any = heapq.heappop(lowerCAmelCase__ )[1][0]
chosen_vertices.add(lowerCAmelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_: List[str] = elem[1][1].index(lowerCAmelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 556 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip() | 518 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = OmegaConf.load(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = torch.load(_UpperCAmelCase ,map_location='cpu' )['model']
__lowerCAmelCase : Union[str, Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : str = 'first_stage_model.'
for key in keys:
if key.startswith(_UpperCAmelCase ):
__lowerCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Dict = 'model.diffusion_model.'
for key in keys:
if key.startswith(_UpperCAmelCase ):
__lowerCAmelCase : Optional[Any] = state_dict[key]
__lowerCAmelCase : Dict = config.model.params.first_stage_config.params
__lowerCAmelCase : str = config.model.params.unet_config.params
__lowerCAmelCase : Dict = VQModel(**_UpperCAmelCase ).eval()
vqvae.load_state_dict(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = UNetLDMModel(**_UpperCAmelCase ).eval()
unet.load_state_dict(_UpperCAmelCase )
__lowerCAmelCase : str = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule='scaled_linear' ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=_UpperCAmelCase ,)
__lowerCAmelCase : Union[str, Any] = LDMPipeline(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
pipeline.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
A_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 718 |
'''simple docstring'''
def A ( _UpperCAmelCase : int = 1_0 ,_UpperCAmelCase : int = 1_0_0_0 ,_UpperCAmelCase : bool = True ) -> int:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def A ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def A ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> None:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(_UpperCAmelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase : Union[str, Any] = lower
__lowerCAmelCase : List[Any] = higher
__lowerCAmelCase : List[str] = []
while True:
__lowerCAmelCase : Union[str, Any] = get_avg(_UpperCAmelCase ,_UpperCAmelCase )
last_numbers.append(_UpperCAmelCase )
if answer(_UpperCAmelCase ) == "low":
__lowerCAmelCase : List[Any] = number
elif answer(_UpperCAmelCase ) == "high":
__lowerCAmelCase : Optional[Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def A ( ) -> None:
'''simple docstring'''
__lowerCAmelCase : int = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase : Optional[Any] = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase : int = int(input('Enter value to guess : ' ).strip() )
guess_the_number(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 123 | 0 |
# Algorithm for the pigeonhole sorting
def UpperCamelCase_( _A :List[Any] )-> int:
UpperCamelCase__ = min(_A ) # min() finds the minimum value
UpperCamelCase__ = max(_A ) # max() finds the maximum value
UpperCamelCase__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_A , _A ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase__ = 0
for count in range(_A ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase__ = count + min_val
i += 1
def UpperCamelCase_( )-> Dict:
UpperCamelCase__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_A )
print("Sorted order is:" , " ".join(_A ) )
if __name__ == "__main__":
main()
| 551 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase = logging.getLogger()
__UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , snake_case ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
UpperCamelCase__ = {"source": "What is love ?", "target": "life"}
UpperCamelCase__ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase__ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case , F'''{split}.{field}''' ) , "w" ) as f:
f.write(snake_case )
def snake_case__ ( self , snake_case , snake_case = "pytorch" ):
'''simple docstring'''
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = os.path.join(snake_case , "output" )
UpperCamelCase__ = os.path.join(snake_case , "data" )
self._create_dummy_data(data_dir=snake_case )
UpperCamelCase__ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCamelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case , env=self.get_env() )
UpperCamelCase__ = os.path.join(snake_case , "metrics.json" )
with open(snake_case ) as f:
UpperCamelCase__ = json.load(snake_case )
return result
@require_torch_gpu
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 551 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A : Optional[int] = "src/diffusers"
__A : Union[str, Any] = "."
# This is to make sure the diffusers module imported is the one in the repo.
__A : Union[str, Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__A : List[Any] = spec.loader.load_module()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Dict ):
"""simple docstring"""
return line.startswith(UpperCamelCase ) or len(UpperCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , UpperCamelCase ) is not None
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =object_name.split("." )
A__ : Dict =0
# First let's find the module where our object lives.
A__ : List[Any] =parts[i]
while i < len(UpperCamelCase ) and not os.path.isfile(os.path.join(UpperCamelCase , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCamelCase ):
A__ : Union[str, Any] =os.path.join(UpperCamelCase , parts[i] )
if i >= len(UpperCamelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCamelCase , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ : int =f.readlines()
# Now let's find the class / func in the code!
A__ : int =""
A__ : Union[str, Any] =0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : str =line_index
while line_index < len(UpperCamelCase ) and _should_continue(lines[line_index] , UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Tuple =lines[start_index:line_index]
return "".join(UpperCamelCase )
__A : int = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__A : Optional[Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
__A : List[Any] = re.compile(R"<FILL\s+[^>]*>")
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : str =code.split("\n" )
A__ : Optional[int] =0
while idx < len(UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Union[str, Any] =len(get_indent(UpperCamelCase ) ) > 0
if has_indent:
A__ : Optional[int] =F'''class Bla:\n{code}'''
A__ : Optional[Any] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase )
A__ : Any =black.format_str(UpperCamelCase , mode=UpperCamelCase )
A__ , A__ : int =style_docstrings_in_code(UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=False ):
"""simple docstring"""
with open(UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ : Any =f.readlines()
A__ : Optional[Any] =[]
A__ : Dict =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase ):
A__ : str =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : List[Any] =search.groups()
A__ : Optional[Any] =find_code_in_diffusers(UpperCamelCase )
A__ : List[str] =get_indent(UpperCamelCase )
A__ : str =line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple =theoretical_indent
A__ : Dict =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Any =True
while line_index < len(UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase ):
break
A__ : Any =lines[line_index]
A__ : Optional[Any] =_should_continue(UpperCamelCase , UpperCamelCase ) and re.search(F'''^{indent}# End copy''' , UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Union[str, Any] =lines[start_index:line_index]
A__ : Dict ="".join(UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Tuple =[line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCamelCase ) is None]
A__ : Any ="\n".join(UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase ) > 0:
A__ : List[str] =replace_pattern.replace("with" , "" ).split("," )
A__ : Tuple =[_re_replace_pattern.search(UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : List[Any] =pattern.groups()
A__ : Tuple =re.sub(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if option.strip() == "all-casing":
A__ : Tuple =re.sub(obja.lower() , obja.lower() , UpperCamelCase )
A__ : Any =re.sub(obja.upper() , obja.upper() , UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] =blackify(lines[start_index - 1] + theoretical_code )
A__ : List[str] =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : Any =lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : str =start_index + 1
if overwrite and len(UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase )
return diffs
def lowercase ( UpperCamelCase : bool = False ):
"""simple docstring"""
A__ : Optional[int] =glob.glob(os.path.join(UpperCamelCase , "**/*.py" ) , recursive=UpperCamelCase )
A__ : Optional[Any] =[]
for filename in all_files:
A__ : int =is_copy_consistent(UpperCamelCase , UpperCamelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCamelCase ) > 0:
A__ : Optional[int] ="\n".join(UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__A : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 595 | """simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : int="divided_space_time" , UpperCamelCase__ : Tuple=None , ):
A__ : str =parent
A__ : str =batch_size
A__ : Any =image_size
A__ : Union[str, Any] =num_channels
A__ : str =patch_size
A__ : Union[str, Any] =num_frames
A__ : Any =is_training
A__ : Optional[int] =use_labels
A__ : Optional[int] =hidden_size
A__ : Union[str, Any] =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : List[Any] =hidden_act
A__ : str =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Dict =attention_type
A__ : str =initializer_range
A__ : str =scope
A__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ : Optional[Any] =(image_size // patch_size) ** 2
A__ : List[Any] =(num_frames) * self.num_patches_per_frame + 1
def _UpperCAmelCase ( self : str ):
A__ : Dict =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ : List[Any] =None
if self.use_labels:
A__ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ : Tuple =self.num_labels
return config
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
A__ : Union[str, Any] =TimesformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
A__ : Union[str, Any] =TimesformerForVideoClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : int =model(UpperCamelCase__ )
# verify the logits shape
A__ : Optional[int] =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : int =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__magic_name__ : Optional[Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : int = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[Any] =TimesformerModelTester(self )
A__ : int =ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=False ):
A__ : str =copy.deepcopy(UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Union[str, Any] ):
pass
def _UpperCAmelCase ( self : Tuple ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] =model_class(UpperCamelCase__ )
A__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Any =[*signature.parameters.keys()]
A__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict =TimesformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
if not self.has_attentions:
pass
else:
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[Any] =True
for model_class in self.all_model_classes:
A__ : Tuple =self.model_tester.seq_length
A__ : Optional[int] =self.model_tester.num_frames
A__ : List[Any] =True
A__ : Optional[Any] =False
A__ : List[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Any =True
A__ : int =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : str =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[Any] =True
A__ : Optional[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[int] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _UpperCAmelCase ( self : Any ):
def check_hidden_states_output(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
A__ : Any =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : Optional[int] =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ : List[Any] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : Any =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ : Union[str, Any] =np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
UpperCamelCase__ )
A__ : Dict =self.default_image_processor
A__ : Tuple =prepare_video()
A__ : Dict =image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**UpperCamelCase__ )
# verify the logits
A__ : Optional[Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Dict =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 595 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''data2vec-text'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ : str = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self : Any , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Union[int, float] = 1 / 2_55 , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCAmelCase : List[Any] = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : str = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : str = do_rescale
UpperCAmelCase : List[str] = rescale_factor
UpperCAmelCase : List[str] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : str = do_flip_channel_order
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PIL.Image.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : Dict = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Dict = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Any , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCAmelCase ( self : Optional[int] , lowercase : np.ndarray , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(lowercase , data_format=lowercase )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ):
'''simple docstring'''
UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase : Optional[int] = size if size is not None else self.size
UpperCAmelCase : Any = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : int = get_size_dict(lowercase , param_name="crop_size" )
UpperCAmelCase : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase : Dict = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase : Optional[Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase : Tuple = [self.flip_channel_order(image=lowercase ) for image in images]
UpperCAmelCase : Tuple = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : Union[str, Any] , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase ):
UpperCAmelCase : Optional[Any] = target_sizes.numpy()
UpperCAmelCase : Tuple = []
for idx in range(len(lowercase ) ):
UpperCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase )
UpperCAmelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase : Dict = logits.argmax(dim=1 )
UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 595 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
import requests
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = {'''Content-Type''': '''application/json'''}
lowercase_ = requests.post(snake_case__ , json={'''text''': message_body} , headers=snake_case__ )
if response.status_code != 200:
lowercase_ = (
'''Request to slack returned an error '''
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(snake_case__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 97 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
__a = '▁'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Any = PRETRAINED_VOCAB_FILES_MAP
a :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Dict = ['input_ids', 'attention_mask']
a :Optional[Any] = BarthezTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]="<pad>" , SCREAMING_SNAKE_CASE_ : Any="<mask>" , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 97 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "funnel"
UpperCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_0_5_2_2, _UpperCAmelCase : Union[str, Any]=[4, 4, 4], _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : Optional[Any]=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : Tuple=6_4, _UpperCAmelCase : str=3_0_7_2, _UpperCAmelCase : List[Any]="gelu_new", _UpperCAmelCase : str=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Union[str, Any]=0.0, _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : int=1E-9, _UpperCAmelCase : Optional[Any]="mean", _UpperCAmelCase : Tuple="relative_shift", _UpperCAmelCase : Any=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : List[Any]=True, **_UpperCAmelCase : Dict, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = block_sizes
SCREAMING_SNAKE_CASE__ : int = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE__ : Optional[int] = num_decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : int = n_head
SCREAMING_SNAKE_CASE__ : int = d_head
SCREAMING_SNAKE_CASE__ : Tuple = d_inner
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : int = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_std
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
SCREAMING_SNAKE_CASE__ : List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
SCREAMING_SNAKE_CASE__ : List[Any] = attention_type
SCREAMING_SNAKE_CASE__ : List[Any] = separate_cls
SCREAMING_SNAKE_CASE__ : int = truncate_seq
SCREAMING_SNAKE_CASE__ : Optional[int] = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def A_ ( self : int, _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def A_ ( self : Any, _UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 157 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : List[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : int=None, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __call__( self : Optional[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : str = args[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Any = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : int = encodings["input_ids"]
return inputs
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False, _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[str] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : int = end_token.group()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[str] = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Tuple = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : str = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : int = output[key][0]
SCREAMING_SNAKE_CASE__ : int = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : Any ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 157 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = ["""image_processor""", """tokenizer"""]
__magic_name__ :Optional[Any] = """BlipImageProcessor"""
__magic_name__ :Optional[int] = """AutoTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
# add QFormer tokenizer
lowerCAmelCase__ :Union[str, Any] = qformer_tokenizer
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowerCAmelCase__ :str = BatchFeature()
if text is not None:
lowerCAmelCase__ :Dict = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
encoding.update(__UpperCAmelCase )
lowerCAmelCase__ :Any = self.qformer_tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = qformer_text_encoding.pop('input_ids' )
lowerCAmelCase__ :Optional[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowerCAmelCase__ :Optional[int] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer.model_input_names
lowerCAmelCase__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if os.path.isfile(__UpperCAmelCase ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ :str = os.path.join(__UpperCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__UpperCAmelCase )
return super().save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained(__UpperCAmelCase , subfolder='qformer_tokenizer' )
lowerCAmelCase__ :List[str] = cls._get_arguments_from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
args.append(__UpperCAmelCase )
return cls(*__UpperCAmelCase )
| 93 |
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) ,snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :int = logging.get_logger(__name__)
__lowerCamelCase :str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''deit'''
def __init__( self: Dict , __a: Tuple=768 , __a: str=12 , __a: Union[str, Any]=12 , __a: Optional[int]=3_072 , __a: Dict="gelu" , __a: List[Any]=0.0 , __a: Dict=0.0 , __a: List[Any]=0.02 , __a: Optional[Any]=1e-1_2 , __a: Union[str, Any]=224 , __a: Dict=16 , __a: Optional[Any]=3 , __a: List[Any]=True , __a: Optional[Any]=16 , **__a: List[Any] , )-> Tuple:
super().__init__(**__a )
lowerCamelCase : str = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Any = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Tuple = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =version.parse('''1.11''')
@property
def a__ ( self: Dict )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: int )-> float:
return 1e-4
| 42 |
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42 | 1 |
"""simple docstring"""
def lowerCAmelCase_( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = generate_large_matrix()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase_( lowercase_ : list[list[int]] ) -> None:
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def lowerCAmelCase_( lowercase_ : list[int] ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCamelCase = (left + right) // 2
_lowerCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCamelCase = mid + 1
else:
_lowerCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def lowerCAmelCase_( lowercase_ : list[list[int]] ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = len(grid[0] )
for i in range(len(lowercase_ ) ):
_lowerCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def lowerCAmelCase_( lowercase_ : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase_( lowercase_ : list[list[int]] ) -> int:
_lowerCamelCase = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def lowerCAmelCase_( ) -> None:
from timeit import timeit
print('''Running benchmarks''' )
_lowerCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCamelCase = timeit(F"""{func}(grid=grid)""" , setup=lowercase_ , number=5_00 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 661 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowercase = flax_key_tuple[:-1] + ("""weight""",)
__lowercase = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
__lowercase = flax_key_tuple[:-1] + ("""weight""",)
__lowercase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowercase = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if "metadata" in layer:
__lowercase = layer.split("""metadata""" )
__lowercase = """""".join(split_layer[0] )[:-1]
__lowercase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
__lowercase = layer.split("""kvstore""" )
__lowercase = """""".join(split_layer[0] )[:-1]
__lowercase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
__lowercase = layer.split("""/""" )
__lowercase = """/""".join(split_layer[:-1] )
__lowercase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowercase = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__lowercase = """file"""
else:
__lowercase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = rename_keys(UpperCamelCase__ )
__lowercase = {}
for k, v in current_block.items():
__lowercase = v
__lowercase = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str = WEIGHTS_NAME ):
"""simple docstring"""
__lowercase = convert_file_size_to_int(UpperCamelCase__ )
__lowercase = []
__lowercase = {}
__lowercase = 0
__lowercase = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
__lowercase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
__lowercase = flatten_dict(UpperCamelCase__ , sep="""/""" )
__lowercase = {}
for layer in checkpoint_info.keys():
__lowercase , __lowercase , __lowercase = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
__lowercase = content
else:
__lowercase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowercase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowercase = torch.tensor(UpperCamelCase__ )
__lowercase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowercase , __lowercase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase__ )
__lowercase = """/""".join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowercase = os.path.join(
UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowercase = {}
__lowercase = 0
__lowercase = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowercase = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowercase = {}
__lowercase = {}
for idx, shard in enumerate(UpperCamelCase__ ):
__lowercase = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__lowercase = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowercase = shard
for key in shard:
__lowercase = shard_file
# Add the metadata
__lowercase = {"""total_size""": total_size}
__lowercase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
__lowercase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase__ =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowercase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
__lowercase = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
__lowercase = TaTokenizer.from_pretrained("""t5-small""" )
__lowercase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
__lowercase = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
__lowercase = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 442 |
"""simple docstring"""
import os
import numpy
import onnx
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__lowercase = a.name
__lowercase = b.name
__lowercase = """"""
__lowercase = """"""
__lowercase = a == b
__lowercase = name_a
__lowercase = name_b
return res
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
__lowercase = list(model.graph.initializer )
__lowercase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase = inits[i].name
__lowercase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = os.path.dirname(UpperCamelCase__ )
__lowercase = os.path.basename(UpperCamelCase__ )
__lowercase = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowercase = list(model.graph.initializer )
__lowercase = set()
__lowercase = {}
__lowercase = []
__lowercase = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
__lowercase = inits[j].data_type
__lowercase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
__lowercase = inits[i].name
__lowercase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
__lowercase = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__lowercase = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowercase = """optimized_""" + model_file_name
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 442 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowercase = False
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int , __a : List[str]=32 ):
'''simple docstring'''
set_seed(0 )
lowerCamelCase__: List[str] = UNetaDModel(sample_size=__a , in_channels=3 , out_channels=3 )
lowerCamelCase__: List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__: Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__a , )
lowerCamelCase__: Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__: Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__a ) for _ in range(4 )]
lowerCamelCase__: Optional[Any] = [torch.randn((4, 3, 32, 32) ).to(__a ) for _ in range(4 )]
lowerCamelCase__: int = [torch.randint(0 , 1000 , (4,) ).long().to(__a ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__: Any = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__: Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__: Optional[int] = model(__a , timesteps[i] ).sample
lowerCamelCase__: List[Any] = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__: Dict = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__: Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__: Tuple = model(__a , timesteps[i] ).sample
lowerCamelCase__: Optional[Any] = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
| 306 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = 1
lowerCamelCase__: Union[str, Any] = 3
lowerCamelCase__: str = (32, 32)
lowerCamelCase__: str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__a )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale
lowerCamelCase__: Optional[Any] = DDPMScheduler()
lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: Tuple = self.dummy_vae
lowerCamelCase__: Optional[int] = self.dummy_text_encoder
lowerCamelCase__: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: List[str] = """A painting of a squirrel eating a burger"""
lowerCamelCase__: Dict = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: Any = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: List[str] = output.images
lowerCamelCase__: Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: List[str] = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__a , )[0]
lowerCamelCase__: Tuple = image[0, -3:, -3:, -1]
lowerCamelCase__: int = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase__: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase__: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale
lowerCamelCase__: Optional[int] = DDPMScheduler()
lowerCamelCase__: Any = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: List[str] = self.dummy_vae
lowerCamelCase__: Optional[Any] = self.dummy_text_encoder
lowerCamelCase__: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: List[str] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__: Tuple = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: List[str] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Any = """A painting of a squirrel eating a burger"""
lowerCamelCase__: str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: Any = output.images
assert image.shape[0] == 2
lowerCamelCase__: Optional[Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: Dict = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: int = self.dummy_cond_unet_upscale
lowerCamelCase__: Dict = DDPMScheduler()
lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: List[str] = self.dummy_vae
lowerCamelCase__: Tuple = self.dummy_text_encoder
lowerCamelCase__: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase__: Optional[int] = unet.half()
lowerCamelCase__: Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Tuple = """A painting of a squirrel eating a burger"""
lowerCamelCase__: Optional[int] = torch.manual_seed(0 )
lowerCamelCase__: Optional[Any] = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , ).images
lowerCamelCase__: Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
lowerCamelCase__: Dict = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: List[Any] = """a cat sitting on a park bench"""
lowerCamelCase__: Dict = torch.manual_seed(0 )
lowerCamelCase__: Any = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
lowerCamelCase__: int = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Any = """a cat sitting on a park bench"""
lowerCamelCase__: Tuple = torch.manual_seed(0 )
lowerCamelCase__: Optional[int] = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: str = """a cat sitting on a park bench"""
lowerCamelCase__: int = torch.manual_seed(0 )
lowerCamelCase__: Optional[Any] = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="""np""" , )
lowerCamelCase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 306 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_ = Lock()
def lowercase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase = max(lowerCAmelCase , lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase )
def lowercase__ ( lowerCAmelCase ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase = Pipe()
UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase = temp_rs
UpperCAmelCase = temp_rr
for i in range(1 , len(lowerCAmelCase ) - 1 ):
UpperCAmelCase = Pipe()
UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase = temp_rs
UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase , args=(
len(lowerCAmelCase ) - 1,
arr[len(lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*lowerCAmelCase )
UpperCAmelCase = odd_even_transposition(lowerCAmelCase )
print('Sorted List\n' )
print(*lowerCAmelCase )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = "encoder-decoder"
__SCREAMING_SNAKE_CASE : Any = True
def __init__( self , **lowercase_ ) -> str:
super().__init__(**lowercase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase = kwargs.pop('encoder' )
UpperCAmelCase = encoder_config.pop('model_type' )
UpperCAmelCase = kwargs.pop('decoder' )
UpperCAmelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowercase_ , **lowercase_ )
UpperCAmelCase = AutoConfig.for_model(lowercase_ , **lowercase_ )
UpperCAmelCase = True
@classmethod
def a_ ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCAmelCase = True
UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.encoder.to_dict()
UpperCAmelCase = self.decoder.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 183 | 0 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _a ( _lowercase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self : List[Any] ):
lowerCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase__ = 1
lowerCamelCase__ = self.unet(__A , __A ).sample
lowerCamelCase__ = self.scheduler.step(__A , __A , __A ).prev_sample
lowerCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
__UpperCamelCase : Dict = [0, 2, 4, 6, 8]
__UpperCamelCase : Optional[Any] = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Dict = 0
for digit in range(10 ):
__lowerCamelCase : Optional[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCAmelCase , UpperCAmelCase )
return result
__lowerCamelCase : int = 0
for digita in range(10 ):
__lowerCamelCase : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : Dict = ODD_DIGITS
else:
__lowerCamelCase : Any = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase , UpperCAmelCase , )
return result
def _UpperCAmelCase ( UpperCAmelCase : int = 9 ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase , 0 , [0] * length , UpperCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : List[Any] = ["image_processor", "tokenizer"]
a_ : Tuple = "AutoImageProcessor"
a_ : Tuple = "AutoTokenizer"
def __init__( self : Union[str, Any] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
__lowerCamelCase : List[Any] = kwargs.pop("""feature_extractor""" )
__lowerCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : List[Any] = self.image_processor
__lowerCamelCase : Any = False
def __call__( self : Union[str, Any] , *_lowerCamelCase : str , **_lowerCamelCase : Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : List[str] = kwargs.pop("""images""" , _lowerCamelCase )
__lowerCamelCase : List[str] = kwargs.pop("""text""" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : Tuple = args[0]
__lowerCamelCase : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
__lowerCamelCase : int = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Optional[int] = encodings["""input_ids"""]
return inputs
def _snake_case ( self : List[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
__lowerCamelCase : Any = True
__lowerCamelCase : int = self.tokenizer
yield
__lowerCamelCase : Dict = self.image_processor
__lowerCamelCase : List[Any] = False
def _snake_case ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if added_vocab is None:
__lowerCamelCase : List[str] = self.tokenizer.get_added_vocab()
__lowerCamelCase : List[Any] = {}
while tokens:
__lowerCamelCase : Dict = re.search(R"""<s_(.*?)>""" , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
__lowerCamelCase : Dict = start_token.group(1 )
__lowerCamelCase : List[str] = re.search(RF"""</s_{key}>""" , _lowerCamelCase , re.IGNORECASE )
__lowerCamelCase : Optional[int] = start_token.group()
if end_token is None:
__lowerCamelCase : int = tokens.replace(_lowerCamelCase , """""" )
else:
__lowerCamelCase : Dict = end_token.group()
__lowerCamelCase : Optional[Any] = re.escape(_lowerCamelCase )
__lowerCamelCase : List[str] = re.escape(_lowerCamelCase )
__lowerCamelCase : Any = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _lowerCamelCase , re.IGNORECASE )
if content is not None:
__lowerCamelCase : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : Union[str, Any] = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
__lowerCamelCase : Optional[int] = value[0]
__lowerCamelCase : Union[str, Any] = value
else: # leaf nodes
__lowerCamelCase : List[Any] = []
for leaf in content.split(R"""<sep/>""" ):
__lowerCamelCase : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
__lowerCamelCase : Tuple = output[key][0]
__lowerCamelCase : Tuple = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 458 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
assert (
isinstance(__A , __A ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowercase , lowercase : int =1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : Optional[int] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : Tuple = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : List[Any] = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : Optional[int] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583 |
from __future__ import annotations
def _lowercase ( lowercase__ ):
if len(lowercase__ ) == 0:
return array
__lowerCAmelCase, __lowerCAmelCase : List[str] = min(lowercase__ ), max(lowercase__ )
# Compute the variables
__lowerCAmelCase : int = _max - _min + 1
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowerCAmelCase : Optional[int] = i - _min
__lowerCAmelCase : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowerCAmelCase : int = 0
for i in range(lowercase__ ):
while holes_repeat[i] > 0:
__lowerCAmelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input("Enter numbers separated by comma:\n")
_UpperCamelCase = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 583 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase__ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
'''simple docstring'''
return torch.atana(UpperCAmelCase_ , UpperCAmelCase_ ) / math.pi * 2
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = torch.sin(t * math.pi / 2 ) ** 2
_lowercase : int = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCAmelCase_ , UpperCAmelCase_ )
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
pass
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
_lowercase : str = DiffusionAttnUnetaD(UpperCamelCase , n_attn_layers=4 )
_lowercase : str = deepcopy(self.diffusion )
_lowercase : List[str] = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase )
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = MODELS_MAP[model_name]['''url''']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
UpperCamelCase__ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
UpperCamelCase__ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
UpperCamelCase__ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
UpperCamelCase__ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return name.replace(UpperCAmelCase_ , UpperCAmelCase_ )
elif name.startswith(UpperCAmelCase_ ):
return [name.replace(UpperCAmelCase_ , UpperCAmelCase_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=13 ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
_lowercase : List[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
_lowercase : Optional[int] = string[6:]
elif string.startswith('''net.''' ):
_lowercase : List[str] = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
_lowercase : int = string[7:]
if string.startswith('''main.''' ):
_lowercase : List[str] = string[5:]
# mid block
if string[:2].isdigit():
_lowercase : int = string[:2]
_lowercase : Tuple = string[2:]
else:
_lowercase : Optional[Any] = string[0]
_lowercase : Any = string[1:]
if depth == max_depth:
_lowercase : Union[str, Any] = MID_NUM_TO_LAYER[layer_num]
_lowercase : Dict = '''mid_block'''
elif depth > 0 and int(UpperCAmelCase_ ) < 7:
_lowercase : int = DOWN_NUM_TO_LAYER[layer_num]
_lowercase : int = F'down_blocks.{depth}'
elif depth > 0 and int(UpperCAmelCase_ ) > 7:
_lowercase : int = UP_NUM_TO_LAYER[layer_num]
_lowercase : int = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
_lowercase : List[str] = DEPTH_0_TO_LAYER[layer_num]
_lowercase : Tuple = F'up_blocks.{max_depth - 1}' if int(UpperCAmelCase_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
_lowercase : List[Any] = string_left[1:]
if "resnets" in new_layer:
_lowercase : Optional[int] = convert_resconv_naming(UpperCAmelCase_ )
elif "attentions" in new_layer:
_lowercase : str = convert_attn_naming(UpperCAmelCase_ )
_lowercase : int = new_string_left
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowercase : str = prefix + '''.''' + new_layer + '''.''' + string_left
else:
_lowercase : Union[str, Any] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
_lowercase : str = rename(UpperCAmelCase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowercase : int = transform_conv_attns(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_lowercase : Tuple = v
return new_state_dict
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
if len(UpperCAmelCase_ ) == 1:
if len(v.shape ) == 3:
# weight
_lowercase : Optional[Any] = v[:, :, 0]
else:
# bias
_lowercase : str = v
else:
# qkv matrices
_lowercase : int = v.shape[0]
_lowercase : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowercase : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowercase : Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_lowercase : int = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
_lowercase : List[str] = download(UpperCAmelCase_ )
_lowercase : List[str] = MODELS_MAP[model_name]['''sample_rate''']
_lowercase : Union[str, Any] = MODELS_MAP[model_name]['''sample_size''']
_lowercase : str = Object()
_lowercase : Any = sample_size
_lowercase : int = sample_rate
_lowercase : Dict = 0
_lowercase : Optional[int] = UNetaDModel(sample_size=UpperCAmelCase_ , sample_rate=UpperCAmelCase_ )
_lowercase : str = diffusers_model.state_dict()
_lowercase : Tuple = DiffusionUncond(UpperCAmelCase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCAmelCase_ )['''state_dict'''] )
_lowercase : Any = orig_model.diffusion_ema.eval()
_lowercase : List[Any] = orig_model.state_dict()
_lowercase : int = rename_orig_weights(UpperCAmelCase_ )
_lowercase : Union[str, Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowercase : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCAmelCase_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(UpperCAmelCase_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
_lowercase : Tuple = value.squeeze()
_lowercase : int = value
diffusers_model.load_state_dict(UpperCAmelCase_ )
_lowercase : int = 100
_lowercase : Optional[int] = 33
_lowercase : List[Any] = IPNDMScheduler(num_train_timesteps=UpperCAmelCase_ )
_lowercase : List[Any] = torch.manual_seed(UpperCAmelCase_ )
_lowercase : Any = torch.randn([1, 2, config.sample_size] , generator=UpperCAmelCase_ ).to(UpperCAmelCase_ )
_lowercase : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=UpperCAmelCase_ )[:-1]
_lowercase : Any = get_crash_schedule(UpperCAmelCase_ )
_lowercase : List[str] = DanceDiffusionPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
_lowercase : Union[str, Any] = torch.manual_seed(33 )
_lowercase : Dict = pipe(num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ ).audios
_lowercase : Optional[int] = sampling.iplms_sample(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {} )
_lowercase : str = generated.clamp(-1 , 1 )
_lowercase : Any = (generated - audio).abs().sum()
_lowercase : List[str] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , UpperCAmelCase_ )
print('''Diff max''' , UpperCAmelCase_ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ = parser.parse_args()
main(args) | 322 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase_ = None
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
_lowercase : Any = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowercase : int = df_with_partition_id.select('''*''' ).where(F'part_id = {partition_id}' ).drop('''part_id''' )
_lowercase : Union[str, Any] = partition_df.collect()
_lowercase : int = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase__ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : List[str]=None , ):
"""simple docstring"""
_lowercase : List[Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : np.random.Generator ):
"""simple docstring"""
_lowercase : Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : List[Any] = self.split_shard_indices_by_worker(UpperCamelCase , UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCAmelCase__ ( datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase_ = SparkConfig
def __init__( self : Optional[int] , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : str = None , UpperCamelCase : str = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
import pyspark
_lowercase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : Any = df
_lowercase : Dict = working_dir
super().__init__(
cache_dir=UpperCamelCase , config_name=str(self.df.semanticHash() ) , **UpperCamelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(UpperCamelCase : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase )
_lowercase : Tuple = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : str , UpperCamelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase_ ( self : str , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(UpperCamelCase : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowercase : int = self.df.count()
_lowercase : Optional[Any] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : List[Any] = (
self.df.limit(UpperCamelCase )
.repartition(1 )
.mapInArrow(UpperCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : List[Any] = min(UpperCamelCase , int(approx_total_size / max_shard_size ) )
_lowercase : Optional[int] = self.df.repartition(UpperCamelCase )
def lowerCAmelCase_ ( self : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , ):
"""simple docstring"""
import pyspark
_lowercase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowercase : List[str] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase ) ) if self._working_dir else fpath
_lowercase : str = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Tuple = self.config.features
_lowercase : Tuple = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : str = pyspark.TaskContext().taskAttemptId()
_lowercase : List[Any] = next(UpperCamelCase , UpperCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowercase : Dict = 0
_lowercase : Optional[int] = writer_class(
features=UpperCamelCase , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : int = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowercase : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase )
if writer._num_bytes > 0:
_lowercase , _lowercase : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase ) ):
_lowercase : List[str] = os.path.join(os.path.dirname(UpperCamelCase ) , os.path.basename(UpperCamelCase ) )
shutil.move(UpperCamelCase , UpperCamelCase )
_lowercase : int = (
self.df.mapInArrow(UpperCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : "datasets.SplitGenerator" , UpperCamelCase : str = "arrow" , UpperCamelCase : Optional[Union[str, int]] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
self._validate_cache_dir()
_lowercase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase )
_lowercase : Optional[Any] = not is_remote_filesystem(self._fs )
_lowercase : List[Any] = os.path.join if is_local else posixpath.join
_lowercase : Optional[int] = '''-TTTTT-SSSSS-of-NNNNN'''
_lowercase : str = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_lowercase : List[str] = path_join(self._output_dir , UpperCamelCase )
_lowercase : Optional[Any] = 0
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : int = []
_lowercase : Union[str, Any] = []
for task_id, content in self._prepare_split_single(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase )
_lowercase : Optional[Any] = total_num_examples
_lowercase : int = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , ):
rename(
UpperCamelCase , fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , F'{global_shard_id:05d}' ).replace('''NNNNN''' , F'{total_shards:05d}' ) , )
_lowercase : List[Any] = []
_lowercase : int = 0
for i in range(len(UpperCamelCase ) ):
_lowercase , _lowercase : Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase , len(UpperCamelCase ) ).map(lambda UpperCamelCase : _rename_shard(*UpperCamelCase ) ).collect()
else:
# don't use any pattern
_lowercase : Optional[int] = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace(UpperCamelCase , '''''' ) , )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df ) | 322 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__( __magic_name__ ):
lowerCAmelCase = '''realm'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[int]=3_05_22 , __SCREAMING_SNAKE_CASE : Optional[int]=7_68 , __SCREAMING_SNAKE_CASE : Tuple=1_28 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Any=30_72 , __SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=5_12 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : str=1E-1_2 , __SCREAMING_SNAKE_CASE : Optional[int]=2_56 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : Any=1E-3 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : int=3_20 , __SCREAMING_SNAKE_CASE : Union[str, Any]=13_35_37_18 , __SCREAMING_SNAKE_CASE : List[str]=50_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = retriever_proj_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = num_candidates
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
# Reader config
__SCREAMING_SNAKE_CASE = span_hidden_size
__SCREAMING_SNAKE_CASE = max_span_width
__SCREAMING_SNAKE_CASE = reader_layer_norm_eps
__SCREAMING_SNAKE_CASE = reader_beam_size
__SCREAMING_SNAKE_CASE = reader_seq_len
# Retrieval config
__SCREAMING_SNAKE_CASE = num_block_records
__SCREAMING_SNAKE_CASE = searcher_beam_size
| 720 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict:
__SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid"""
raise ValueError(UpperCAmelCase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _a ( UpperCAmelCase__ ) -> dict:
__SCREAMING_SNAKE_CASE = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__SCREAMING_SNAKE_CASE = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__SCREAMING_SNAKE_CASE = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 690 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """rwkv"""
UpperCamelCase_ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=5_0277 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=True , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : str = context_length
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE : str = rescale_every
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 248 | import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = OmegaConf.load(_lowercase )
SCREAMING_SNAKE_CASE : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE : int = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE : Tuple = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**_lowercase ).eval()
vqvae.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = UNetLDMModel(**_lowercase ).eval()
unet.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = LDMPipeline(_lowercase , _lowercase , _lowercase )
pipeline.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCamelCase : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 248 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _lowercase , unittest.TestCase ):
_a = ConsistencyModelPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_a = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase__ ( self ) -> List[Any]:
__a = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def UpperCamelCase__ ( self , UpperCamelCase=False ) -> Tuple:
if class_cond:
__a = self.dummy_cond_unet
else:
__a = self.dummy_uncond_unet
# Default to CM multistep sampler
__a = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__a = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 ) -> int:
if str(A_ ).startswith('mps' ):
__a = torch.manual_seed(A_ )
else:
__a = torch.Generator(device=A_ ).manual_seed(A_ )
__a = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ) -> Dict:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**A_ )
__a = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_dummy_inputs(A_ )
__a = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Dict:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=A_ )
__a = ConsistencyModelPipeline(**A_ )
__a = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_dummy_inputs(A_ )
__a = 0
__a = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**A_ )
__a = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_dummy_inputs(A_ )
__a = 1
__a = None
__a = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Any:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=A_ )
__a = ConsistencyModelPipeline(**A_ )
__a = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_dummy_inputs(A_ )
__a = 1
__a = None
__a = 0
__a = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , UpperCamelCase=0 , UpperCamelCase=False , UpperCamelCase="cpu" , UpperCamelCase=torch.floataa , UpperCamelCase=(1, 3, 64, 64) ) -> Tuple:
__a = torch.manual_seed(A_ )
__a = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__a = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ )
__a = latents
return inputs
def UpperCamelCase__ ( self , UpperCamelCase=0 , UpperCamelCase="cpu" , UpperCamelCase=torch.floataa , UpperCamelCase=(1, 3, 64, 64) ) -> List[Any]:
if type(A_ ) == str:
__a = torch.device(A_ )
__a = torch.Generator(device=A_ ).manual_seed(A_ )
__a = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
return latents
def UpperCamelCase__ ( self ) -> List[str]:
__a = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__a = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_inputs()
__a = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase__ ( self ) -> List[str]:
__a = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__a = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_inputs()
__a = 1
__a = None
__a = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase__ ( self ) -> List[str]:
__a = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__a = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_inputs(get_fixed_latents=A_ , device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__a = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase__ ( self ) -> List[Any]:
__a = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__a = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__a = self.get_inputs(get_fixed_latents=A_ , device=A_ )
__a = 1
__a = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__a = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 539 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'words.txt' )
A = ''
with open(snake_case__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution()) | 91 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """trajectory_transformer"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , A : List[str]=100 , A : Dict=5 , A : int=1 , A : Optional[Any]=1 , A : str=249 , A : Optional[Any]=6 , A : Optional[int]=17 , A : Dict=25 , A : Any=4 , A : Tuple=4 , A : Optional[int]=128 , A : List[Any]=0.1 , A : List[str]=0.1 , A : List[Any]=0.1 , A : Dict=0.0006 , A : List[Any]=512 , A : Dict=0.02 , A : Dict=1E-12 , A : Any=1 , A : Tuple=True , A : List[Any]=1 , A : int=50_256 , A : List[Any]=50_256 , **A : Optional[int] , ):
__snake_case: str = vocab_size
__snake_case: Union[str, Any] = action_weight
__snake_case: List[Any] = reward_weight
__snake_case: Optional[int] = value_weight
__snake_case: Optional[Any] = max_position_embeddings
__snake_case: Any = block_size
__snake_case: Any = action_dim
__snake_case: Tuple = observation_dim
__snake_case: str = transition_dim
__snake_case: Optional[Any] = learning_rate
__snake_case: Tuple = n_layer
__snake_case: List[Any] = n_head
__snake_case: Any = n_embd
__snake_case: int = embd_pdrop
__snake_case: Union[str, Any] = attn_pdrop
__snake_case: Dict = resid_pdrop
__snake_case: List[str] = initializer_range
__snake_case: Dict = layer_norm_eps
__snake_case: Optional[Any] = kaiming_initializer_range
__snake_case: Optional[int] = use_cache
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """layoutlmv3"""
def __init__( self : Any , A : List[Any]=50_265 , A : Any=768 , A : List[Any]=12 , A : Optional[Any]=12 , A : str=3_072 , A : List[str]="gelu" , A : Tuple=0.1 , A : Optional[Any]=0.1 , A : Optional[Any]=512 , A : str=2 , A : int=0.02 , A : Any=1E-5 , A : List[str]=1 , A : int=0 , A : str=2 , A : Optional[int]=1_024 , A : Optional[int]=128 , A : List[str]=128 , A : str=True , A : Union[str, Any]=32 , A : List[Any]=128 , A : Union[str, Any]=64 , A : List[str]=256 , A : Tuple=True , A : Optional[int]=True , A : Dict=True , A : Dict=224 , A : Dict=3 , A : List[Any]=16 , A : Tuple=None , **A : int , ):
super().__init__(
vocab_size=A , hidden_size=A , num_hidden_layers=A , num_attention_heads=A , intermediate_size=A , hidden_act=A , hidden_dropout_prob=A , attention_probs_dropout_prob=A , max_position_embeddings=A , type_vocab_size=A , initializer_range=A , layer_norm_eps=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
__snake_case: int = max_ad_position_embeddings
__snake_case: List[Any] = coordinate_size
__snake_case: List[Any] = shape_size
__snake_case: Any = has_relative_attention_bias
__snake_case: Any = rel_pos_bins
__snake_case: Optional[Any] = max_rel_pos
__snake_case: Union[str, Any] = has_spatial_attention_bias
__snake_case: str = rel_ad_pos_bins
__snake_case: Tuple = max_rel_ad_pos
__snake_case: Optional[int] = text_embed
__snake_case: List[Any] = visual_embed
__snake_case: int = input_size
__snake_case: List[Any] = num_channels
__snake_case: Union[str, Any] = patch_size
__snake_case: str = classifier_dropout
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.12""" )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCAmelCase__ ( self : Dict ):
return 1E-5
@property
def UpperCAmelCase__ ( self : str ):
return 12
def UpperCAmelCase__ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 3 , A : int = 40 , A : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , A )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case: Optional[int] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case: Tuple = processor.tokenizer.num_special_tokens_to_add(A )
__snake_case: Optional[int] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
__snake_case: Optional[Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case: Optional[int] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case: List[str] = self._generate_dummy_images(A , A , A , A )
__snake_case: int = dict(
processor(
A , text=A , boxes=A , return_tensors=A , ) )
return inputs
| 155 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A ( UpperCamelCase__ ):
@slow
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__magic_name__ : int =BertTokenizer.from_pretrained("""bert-base-uncased""" )
__magic_name__ : str =bertabert.config.encoder.vocab_size
__magic_name__ : Union[str, Any] =tokenizer.sep_token_id
__magic_name__ : Optional[Any] =tokenizer.cls_token_id
__magic_name__ : str =1_28
__magic_name__ : List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__magic_name__ : Union[str, Any] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__magic_name__ : Union[str, Any] =train_dataset.select(range(32 ) )
__magic_name__ : str =val_dataset.select(range(16 ) )
__magic_name__ : int =4
def _map_to_encoder_decoder_inputs(__snake_case :Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : Dict =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__snake_case , max_length=5_12 )
__magic_name__ : Dict =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__snake_case , max_length=1_28 )
__magic_name__ : Optional[int] =inputs.input_ids
__magic_name__ : Tuple =inputs.attention_mask
__magic_name__ : Any =outputs.input_ids
__magic_name__ : Tuple =outputs.input_ids.copy()
__magic_name__ : Union[str, Any] =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__magic_name__ : List[str] =outputs.attention_mask
assert all(len(__snake_case ) == 5_12 for x in inputs.input_ids )
assert all(len(__snake_case ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case :Tuple ):
__magic_name__ : Tuple =pred.label_ids
__magic_name__ : Any =pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Optional[int] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Dict =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__magic_name__ : List[str] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__magic_name__ : Tuple =self.get_auto_remove_tmp_dir()
__magic_name__ : Optional[int] =SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy="""steps""" , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : List[str] =SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 21 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : int , **__a : Optional[Any] ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return self.get_dummy_input()
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , ):
"""simple docstring"""
__UpperCAmelCase : int = 4
__UpperCAmelCase : Dict = 32
__UpperCAmelCase : List[Any] = (32, 32)
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = torch.device(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = (batch_size, num_channels) + sizes
__UpperCAmelCase : List[str] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ )
__UpperCAmelCase : Any = {"hidden_states": hidden_states}
if include_temb:
__UpperCAmelCase : List[str] = 128
__UpperCAmelCase : int = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase_ , device=UpperCAmelCase_ )
if include_res_hidden_states_tuple:
__UpperCAmelCase : Tuple = torch.manual_seed(1 )
__UpperCAmelCase : Dict = (randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ ),)
if include_encoder_hidden_states:
__UpperCAmelCase : Dict = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase_ )
if include_skip_sample:
__UpperCAmelCase : Any = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase_ , device=UpperCAmelCase_ )
return dummy_input
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
__UpperCAmelCase : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
__UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase : List[str] = self.block_class(**UpperCAmelCase_ )
unet_block.to(UpperCAmelCase_ )
unet_block.eval()
with torch.no_grad():
__UpperCAmelCase : Any = unet_block(**UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = output[0]
self.assertEqual(output.shape , self.output_shape )
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:]
__UpperCAmelCase : int = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase_ , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = self.block_class(**UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
__UpperCAmelCase : int = model(**UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = output[0]
__UpperCAmelCase : Any = torch.device(UpperCAmelCase_ )
__UpperCAmelCase : str = randn_tensor(output.shape , device=UpperCAmelCase_ )
__UpperCAmelCase : Any = torch.nn.functional.mse_loss(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
| 714 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''pix2struct_text_model'''
SCREAMING_SNAKE_CASE = ['''past_key_values''']
SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , UpperCAmelCase_ : str=50_244 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : Tuple=2_048 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : int="gelu_new" , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : List[str] = d_kv
__UpperCAmelCase : Tuple = d_ff
__UpperCAmelCase : Optional[int] = num_layers
__UpperCAmelCase : Union[str, Any] = num_heads
__UpperCAmelCase : List[str] = relative_attention_num_buckets
__UpperCAmelCase : Optional[Any] = relative_attention_max_distance
__UpperCAmelCase : str = dropout_rate
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : int = initializer_factor
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : Tuple = eos_token_id
__UpperCAmelCase : Dict = decoder_start_token_id
# for backwards compatibility
__UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__UpperCAmelCase : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''pix2struct_vision_model'''
def __init__( self : str , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : str=2_048 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple="gelu_new" , UpperCAmelCase_ : List[str]=1e-6 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : List[str]=1e-10 , UpperCAmelCase_ : Tuple=1.0 , UpperCAmelCase_ : List[str]=4_096 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : str=128 , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Tuple = patch_embed_hidden_size
__UpperCAmelCase : int = d_ff
__UpperCAmelCase : List[str] = dropout_rate
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Tuple = initializer_factor
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Any = dense_act_fn
__UpperCAmelCase : int = seq_len
__UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
__UpperCAmelCase : str = relative_attention_max_distance
__UpperCAmelCase : Dict = d_kv
@classmethod
def lowerCamelCase_ ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__UpperCAmelCase : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''pix2struct'''
SCREAMING_SNAKE_CASE = True
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
if text_config is None:
__UpperCAmelCase : Union[str, Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
__UpperCAmelCase : Union[str, Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
__UpperCAmelCase : str = PixaStructTextConfig(**UpperCAmelCase_ )
__UpperCAmelCase : Dict = PixaStructVisionConfig(**UpperCAmelCase_ )
__UpperCAmelCase : Any = self.text_config.decoder_start_token_id
__UpperCAmelCase : Dict = self.text_config.pad_token_id
__UpperCAmelCase : Tuple = self.text_config.eos_token_id
__UpperCAmelCase : int = initializer_factor
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = self.initializer_range
__UpperCAmelCase : Tuple = self.initializer_range
__UpperCAmelCase : Optional[Any] = is_vqa
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , UpperCAmelCase_ : PixaStructTextConfig , UpperCAmelCase_ : PixaStructVisionConfig , **UpperCAmelCase_ : int ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Dict = self.text_config.to_dict()
__UpperCAmelCase : Dict = self.vision_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 329 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : Any=None , _snake_case : List[str]=None ) -> Optional[int]:
'''simple docstring'''
_A = True
while ask_again:
_A = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any]=[] , _snake_case : str=None , _snake_case : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
_A = BulletMenu(_snake_case , _snake_case )
_A = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def _snake_case ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
_A = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _snake_case ( _snake_case : Any ) -> Tuple:
'''simple docstring'''
_A = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _snake_case ( _snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _snake_case ( _snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
_A = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = usage.replace('<command> [<args>] ' , '' )
return usage
| 7 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def lowerCamelCase_ ( A : np.ndarray , A : Union[int, Iterable[int]] , A : bool , A : int ):
"""simple docstring"""
def constraint_to_multiple_of(A : Tuple , A : Optional[Any] , A : Tuple=0 , A : str=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(A , A ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(A )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=A )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=A )
return (new_height, new_width)
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = ['pixel_values']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase)
lowerCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
lowerCAmelCase_ = get_size_dict(_UpperCAmelCase)
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
lowerCAmelCase_ = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(_UpperCAmelCase)
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None):
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(_UpperCAmelCase):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(_UpperCAmelCase)):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_UpperCAmelCase)
lowerCAmelCase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_UpperCAmelCase)
else:
lowerCAmelCase_ = logits.argmax(dim=1)
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 413 |
def lowerCamelCase_ ( A : int = 1_00 ):
"""simple docstring"""
lowerCAmelCase_ = (n * (n + 1) // 2) ** 2
lowerCAmelCase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 413 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "esm"
def __init__( self: Optional[int] , __A: str=None , __A: str=None , __A: Dict=None , __A: List[Any]=7_68 , __A: Union[str, Any]=12 , __A: Any=12 , __A: Optional[int]=30_72 , __A: Optional[Any]=0.1 , __A: str=0.1 , __A: str=10_26 , __A: List[str]=0.02 , __A: Any=1e-12 , __A: List[Any]="absolute" , __A: Optional[Any]=True , __A: int=None , __A: List[str]=False , __A: List[Any]=False , __A: List[Any]=None , __A: Dict=None , **__A: Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=__A , mask_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = emb_layer_norm_before
_A = token_dropout
_A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A = EsmFoldConfig()
elif isinstance(__A , __A ):
_A = EsmFoldConfig(**__A )
_A = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A = get_default_vocab_list()
else:
_A = vocab_list
else:
_A = None
_A = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __A ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self: Dict ) -> Optional[Any]:
_A = super().to_dict()
if isinstance(self.esmfold_config , __A ):
_A = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def __A ( self: Optional[Any] ) -> List[Any]:
if self.trunk is None:
_A = TrunkConfig()
elif isinstance(self.trunk , __A ):
_A = TrunkConfig(**self.trunk )
def __A ( self: Dict ) -> Dict:
_A = asdict(self )
_A = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def __A ( self: Tuple ) -> Any:
if self.structure_module is None:
_A = StructureModuleConfig()
elif isinstance(self.structure_module , __A ):
_A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A = self.sequence_state_dim // self.sequence_head_width
_A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self: List[str] ) -> Optional[int]:
_A = asdict(self )
_A = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1e-8
A_ = 1e5
def __A ( self: Optional[Any] ) -> str:
return asdict(self )
def __A ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 484 | 1 |
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __magic_name__ ( *__snake_case : int ) -> int:
with open(__snake_case , "r" ) as fh:
fcntl.flock(__snake_case , fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case , fcntl.LOCK_UN )
_A : Union[str, Any] = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
_A : List[str] = torch.device("""cuda""", local_rank)
_A : Tuple = socket.gethostname()
_A : Tuple = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_A : Union[str, Any] = dist.get_rank()
_A : Any = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 708 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__ ( ) -> int:
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=__snake_case , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=__snake_case , default=5 )
parser.add_argument("--batch_size" , type=__snake_case , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=__snake_case , default=1 )
parser.add_argument("--freeze" , type=__snake_case , default=__snake_case )
parser.add_argument("--learning_rate" , type=__snake_case , default=5E-4 )
parser.add_argument("--seed" , type=__snake_case , default=0 )
parser.add_argument("--lr_scheduler_type" , type=__snake_case , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=__snake_case , default=10 )
parser.add_argument("--weight_decay" , type=__snake_case , default=0.01 )
parser.add_argument("--output_dir" , type=__snake_case , default="./results" )
return parser.parse_args()
_A : Tuple = load("""accuracy""")
def __magic_name__ ( __snake_case : List[str] ) -> Tuple:
lowercase , lowercase : int = eval_pred
lowercase : Optional[int] = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class a__ ( a_ ):
def __init__( self , _a ):
super().__init__()
lowercase : List[Any] = trainer
def __magic_name__ ( self , _a , _a , _a , **_a ):
if control.should_evaluate:
lowercase : List[str] = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __magic_name__ ( ) -> List[Any]:
lowercase : List[str] = get_args()
set_seed(args.seed )
lowercase : Union[str, Any] = load_dataset("codeparrot/codecomplex" , split="train" )
lowercase : str = dataset.train_test_split(test_size=0.2 )
lowercase : Union[str, Any] = train_test["test"].train_test_split(test_size=0.5 )
lowercase : Tuple = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase : List[Any] = tokenizer.eos_token
lowercase : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase : Optional[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase : Any = False
lowercase : Optional[int] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(__snake_case : str ):
lowercase : int = tokenizer(example["src"] , truncation=__snake_case , max_length=1024 )
lowercase : Any = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase : Dict = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation["train"].column_names , )
lowercase : List[Any] = DataCollatorWithPadding(tokenizer=__snake_case )
lowercase : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
lowercase : int = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print("Training..." )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 518 | 0 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_lowerCamelCase : Optional[int] = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : Optional[Any] , __A : str , __A : List[str] ) -> int:
if gpta_config_file == "":
_UpperCAmelCase : Optional[Any] = GPTaConfig()
else:
_UpperCAmelCase : Dict = GPTaConfig.from_json_file(_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_UpperCAmelCase : List[str] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 704 |
def _lowerCamelCase ( __A : int ) -> bool:
if not isinstance(__A , __A ):
_UpperCAmelCase : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__A )
if number < 0:
return False
_UpperCAmelCase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | 0 |
def __lowerCAmelCase ( UpperCAmelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase_ = 1
lowerCamelCase_ = 2
while i * i <= n:
lowerCamelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ) -> Tuple:
lowerCamelCase_ = 1
lowerCamelCase_ = 1
while True:
i += 1
t_num += i
if count_divisors(snake_case__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 272 |
import numpy as np
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
lowerCAmelCase__ = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase__ = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
lowerCAmelCase__ = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase__ = vector.conj().T if is_complex else vector.T
lowerCAmelCase__ = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
lowerCAmelCase__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase__ = True
lowerCAmelCase__ = lambda_
if is_complex:
lowerCAmelCase__ = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCAmelCase__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase__ = np.array([41, 4, 20] )
lowerCAmelCase__ = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase__ = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase__ = real_input_matrix
lowerCAmelCase__ = real_vector
elif problem_type == "complex":
lowerCAmelCase__ = complex_input_matrix
lowerCAmelCase__ = complex_vector
# Our implementation.
lowerCAmelCase__ , lowerCAmelCase__ = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase__ , lowerCAmelCase__ = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
lowerCAmelCase__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 193 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 4_2
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[Any] = None
def _A ( ):
"""simple docstring"""
__lowercase = Node(1 )
__lowercase = Node(2 )
__lowercase = Node(3 )
__lowercase = Node(4 )
__lowercase = Node(5 )
return tree
def _A ( A__ ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A ( A__ ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A ( A__ ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A ( A__ ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
if root is None:
return output
__lowercase = deque([root] )
while process_queue:
__lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
def populate_output(A__ , A__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
def populate_output(A__ , A__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _A ( A__ ):
"""simple docstring"""
if root is None:
return []
__lowercase = []
__lowercase = 0
__lowercase = height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
__lowercase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
__lowercase = 0
return output
def _A ( ): # Main function for testing.
"""simple docstring"""
__lowercase = make_tree()
print(F"In-order Traversal: {inorder(snake_case__ )}" )
print(F"Pre-order Traversal: {preorder(snake_case__ )}" )
print(F"Post-order Traversal: {postorder(snake_case__ )}" , '''\n''' )
print(F"Height of Tree: {height(snake_case__ )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 0 |
"""simple docstring"""
def _snake_case ( ):
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def _snake_case ( snake_case__ : List[Any] ):
A = 1
A = 2
while i * i <= n:
A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _snake_case ( ):
return next(i for i in triangle_number_generator() if count_divisors(snake_case__ ) > 500 )
if __name__ == "__main__":
print(solution()) | 91 |
def UpperCamelCase__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowercase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase__ )
lowercase = 1
for i in range(1 ,lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
A = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
A = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def A_ ( self : List[str] , snake_case : List[Any] , snake_case : Dict ) -> int:
'''simple docstring'''
A = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
A = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=snake_case , predictions=snake_case )
return score
| 109 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109 | 1 |
'''simple docstring'''
def a__ ( UpperCamelCase_ : int ):
assert column_title.isupper()
UpperCAmelCase__ :Union[str, Any] = 0
UpperCAmelCase__ :Optional[Any] = len(__UpperCamelCase ) - 1
UpperCAmelCase__ :Any = 0
while index >= 0:
UpperCAmelCase__ :List[Any] = (ord(column_title[index] ) - 64) * pow(26, __UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 467 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 0 |
# Function to print upper half of diamond (pyramid)
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] ):
"""simple docstring"""
for i in range(0 , UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
for i in range(UpperCAmelCase , 0 , -1 ):
for _ in range(UpperCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCAmelCase ) # upper half
reverse_floyd(UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__UpperCamelCase : Any = 1
while K:
__UpperCamelCase : Optional[int] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__UpperCamelCase : Union[str, Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 458 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 50 ):
"""simple docstring"""
lowercase_ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 620 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 620 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Dict=1_8 , SCREAMING_SNAKE_CASE__ : int=3_0 , SCREAMING_SNAKE_CASE__ : str=4_0_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=True , ) -> Optional[int]:
a_ : int = size if size is not None else {'height': 1_8, 'width': 1_8}
a_ : Dict = parent
a_ : Tuple = batch_size
a_ : List[str] = num_channels
a_ : Optional[int] = image_size
a_ : int = min_resolution
a_ : Optional[Any] = max_resolution
a_ : int = do_resize
a_ : Any = size
a_ : Tuple = do_normalize
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : List[str] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
a_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
a_ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE__ ).to_dict()
a_ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE__ ).to_dict()
a_ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE__ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : List[str] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
a_ : Dict = Image.open(dataset[4]['file'] )
a_ : Optional[Any] = Image.open(dataset[5]['file'] )
a_ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : Optional[int] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
a_ : Union[str, Any] = prepare_images()
# test non-batched
a_ : Any = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
a_ : str = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE__ )
# test batched
a_ : str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
a_ : Optional[int] = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE__ )
| 443 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
a_ : str = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a_ : Any = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__A )
# Let's go
a_ : int = parser.parse_args()
if not hasattr(__A , 'func' ):
parser.print_help()
exit(1 )
# Run
a_ : Union[str, Any] = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 443 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
__lowerCAmelCase = None
def lowercase( UpperCamelCase_ , UpperCamelCase_=0.9_9_9 , UpperCamelCase_="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(UpperCamelCase_ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : str , lowerCamelCase_ : int = 1000 , lowerCamelCase_ : str = "fixed_small_log" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[float] = 1.0 , lowerCamelCase_ : str = "epsilon" , lowerCamelCase_ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCamelCase = betas_for_alpha_bar(lowerCamelCase_ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , lowerCamelCase_ )[::-1].copy() )
UpperCamelCase = variance_type
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None ):
"""simple docstring"""
return sample
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : int=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(lowerCamelCase_ , min=1E-20 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self : str , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Any=None , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
lowerCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ , device=model_output.device )
UpperCamelCase = self._get_variance(
lowerCamelCase_ , predicted_variance=lowerCamelCase_ , prev_timestep=lowerCamelCase_ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 537 | import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_SCREAMING_SNAKE_CASE = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_SCREAMING_SNAKE_CASE = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_SCREAMING_SNAKE_CASE = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : List[Any]=False ):
"""simple docstring"""
UpperCamelCase = compute_bleu(
reference_corpus=lowerCamelCase_ , translation_corpus=lowerCamelCase_ , max_order=lowerCamelCase_ , smooth=lowerCamelCase_ )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 537 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase_ ( *_A , _A = None , _A=True , _A=2 ):
'''simple docstring'''
from .. import __version__
SCREAMING_SNAKE_CASE__ = take_from
SCREAMING_SNAKE_CASE__ = ()
if not isinstance(args[0] , _A ):
SCREAMING_SNAKE_CASE__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_A ).base_version ) >= version.parse(_A ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
SCREAMING_SNAKE_CASE__ = None
if isinstance(_A , _A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_A ),)
SCREAMING_SNAKE_CASE__ = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_A , _A ):
values += (getattr(_A , _A ),)
SCREAMING_SNAKE_CASE__ = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE__ = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
SCREAMING_SNAKE_CASE__ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _A , stacklevel=_A )
if isinstance(_A , _A ) and len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE__ = call_frame.filename
SCREAMING_SNAKE_CASE__ = call_frame.lineno
SCREAMING_SNAKE_CASE__ = call_frame.function
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_A ) == 0:
return
elif len(_A ) == 1:
return values[0]
return values
| 707 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = AltDiffusionPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Tuple ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0 ) -> Union[str, Any]:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : List[Any] ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ) -> Any:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 472 | 0 |
"""simple docstring"""
import qiskit
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : Union[str, Any] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase : List[str] = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 77 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Optional[int] = None
_A : Optional[jnp.ndarray] = None
_A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls : List[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : jnp.ndarray
_A : jnp.ndarray
_A : KarrasVeSchedulerState
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1_0_0 , lowerCAmelCase__ : float = 1.0_07 , lowerCAmelCase__ : float = 8_0 , lowerCAmelCase__ : float = 0.05 , lowerCAmelCase__ : float = 5_0 , ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple = () ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = jnp.arange(0 , lowerCAmelCase__ )[::-1].copy()
__SCREAMING_SNAKE_CASE : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase__ , schedule=jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE : Any = random.split(lowerCAmelCase__ , num=1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.config.s_noise * random.normal(key=lowerCAmelCase__ , shape=sample.shape )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE : Dict = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError() | 578 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ = grid[0]
for row_n in range(1, len(a__ ) ):
SCREAMING_SNAKE_CASE_ = grid[row_n]
SCREAMING_SNAKE_CASE_ = fill_row(a__, a__ )
SCREAMING_SNAKE_CASE_ = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( __a, __a ):
current_row[0] += row_above[0]
for cell_n in range(1, len(a__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(A_ )
self.set_fail_transitions()
def UpperCAmelCase_ ( self , A_ , A_ )-> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = 0
for character in keyword:
UpperCamelCase = self.find_next_state(A_ , A_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase = len(self.adlist ) - 1
else:
UpperCamelCase = next_state
self.adlist[current_state]["output"].append(A_ )
def UpperCAmelCase_ ( self )-> None:
'''simple docstring'''
UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(A_ )
UpperCamelCase = 0
while q:
UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A_ )
UpperCamelCase = self.adlist[r]['fail_state']
while (
self.find_next_state(A_ , self.adlist[child]['value'] ) is None
and state != 0
):
UpperCamelCase = self.adlist[state]['fail_state']
UpperCamelCase = self.find_next_state(
A_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase = 0
UpperCamelCase = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCAmelCase_ ( self , A_ )-> dict[str, list[int]]:
'''simple docstring'''
UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase = 0
for i in range(len(A_ ) ):
while (
self.find_next_state(A_ , string[i] ) is None
and current_state != 0
):
UpperCamelCase = self.adlist[current_state]['fail_state']
UpperCamelCase = self.find_next_state(A_ , string[i] )
if next_state is None:
UpperCamelCase = 0
else:
UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase = []
result[key].append(i - len(A_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> list:
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = result + left + right
return input_list
def A_ ( __SCREAMING_SNAKE_CASE : list ) -> list:
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__SCREAMING_SNAKE_CASE : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__SCREAMING_SNAKE_CASE : Dict = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = i
__SCREAMING_SNAKE_CASE : Optional[int] = i + p - 1
__SCREAMING_SNAKE_CASE : int = (low + high + 1) // 2
__SCREAMING_SNAKE_CASE : Tuple = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = i
__SCREAMING_SNAKE_CASE : List[str] = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_A = []
else:
_A = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 158 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase : str = "naver-clova-ix/donut-base"
class A ( unittest.TestCase ):
def snake_case__ ( self : Tuple ) -> List[Any]:
__UpperCAmelCase = DonutProcessor.from_pretrained(__a )
def snake_case__ ( self : int ) -> int:
__UpperCAmelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase = self.processor.tokenajson(__a )
self.assertDictEqual(__a , __a )
| 654 | '''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''realm'''
def __init__( self : Tuple ,__A : List[str]=3_0522 ,__A : Any=768 ,__A : Dict=128 ,__A : List[Any]=12 ,__A : Dict=12 ,__A : Union[str, Any]=8 ,__A : Optional[Any]=3072 ,__A : Optional[Any]="gelu_new" ,__A : str=0.1 ,__A : Dict=0.1 ,__A : Optional[int]=512 ,__A : Any=2 ,__A : List[Any]=0.02 ,__A : Any=1e-12 ,__A : List[str]=256 ,__A : Any=10 ,__A : int=1e-3 ,__A : Union[str, Any]=5 ,__A : List[Any]=320 ,__A : List[Any]=1335_3718 ,__A : Tuple=5000 ,__A : str=1 ,__A : Dict=0 ,__A : int=2 ,**__A : Dict ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
# Common config
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = retriever_proj_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = num_candidates
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
# Reader config
_lowercase = span_hidden_size
_lowercase = max_span_width
_lowercase = reader_layer_norm_eps
_lowercase = reader_beam_size
_lowercase = reader_seq_len
# Retrieval config
_lowercase = num_block_records
_lowercase = searcher_beam_size | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_A = 5_0_0_0_3
_A = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = PLBartTokenizer
a = None
a = False
def lowerCAmelCase_ ( self : int ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = PLBartTokenizer(_snake_case , language_codes="base" , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = PLBartTokenizer(_snake_case , language_codes="base" , keep_accents=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ["__java__", "__python__", "__en_XX__", "<mask>"] )
SCREAMING_SNAKE_CASE__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE__ = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def lowerCAmelCase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = PLBartTokenizer(_snake_case , language_codes="multi" , keep_accents=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
SCREAMING_SNAKE_CASE__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE__ = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
a = "uclanlp/plbart-python-en_XX"
a = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
a = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
a = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
SCREAMING_SNAKE_CASE__ = 1
return cls
def lowerCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def lowerCAmelCase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _snake_case )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowerCAmelCase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
SCREAMING_SNAKE_CASE__ = PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def lowerCAmelCase_ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ = targets["input_ids"]
SCREAMING_SNAKE_CASE__ = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 538 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 538 | 1 |
from math import factorial
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__lowerCamelCase : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCamelCase : Optional[Any] = float(factorial(UpperCAmelCase ) )
coefficient /= factorial(UpperCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 519 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Any = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
__lowerCamelCase : List[str] = Dataset.from_dict(UpperCAmelCase )
return dataset
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = get_dataset()
__lowerCamelCase : Dict = make_duplicate_clusters(_lowerCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = get_dataset()
__lowerCamelCase , __lowerCamelCase : List[Any] = deduplicate_dataset(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 2 )
print(_lowerCamelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowerCamelCase )
| 519 | 1 |
from math import isqrt
def snake_case_ ( __lowercase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCamelCase ) + 1 ) )
def snake_case_ ( __lowercase = 1_0**6 ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }') | 716 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 641 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowercase = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__lowercase = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__lowercase = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="binary" ):
'''simple docstring'''
A_ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ = f"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
A_ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A_ = [(pred, label)]
A_ ,A_ = [], []
for question, preds_labels in question_map.items():
A_ ,A_ = zip(*SCREAMING_SNAKE_CASE )
A_ = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(SCREAMING_SNAKE_CASE )
A_ = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
A_ = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
A_ = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
A_ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ , fa_avg='''macro''' )
elif self.config_name == "record":
A_ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
A_ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCamelCase__ , lowerCamelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 203 |
import random
class _lowercase :
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
"""simple docstring"""
A_ = [ord(lowerCamelCase__ ) for i in text]
A_ = []
A_ = []
for i in plain:
A_ = random.randint(1 , 3_0_0 )
A_ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] ) -> str:
"""simple docstring"""
A_ = []
for i in range(len(lowerCamelCase__ ) ):
A_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
__lowercase , __lowercase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 203 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : Optional[int] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""PoolFormerFeatureExtractor"""]
__a : Dict = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 522 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowercase , lowercase , bias=lowercase )
__lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__a : Union[str, Any] = parser.parse_args()
__a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 522 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Optional[Any] = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a_ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=19 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE=25 , SCREAMING_SNAKE_CASE=5 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length
SCREAMING_SNAKE_CASE_ = cardinality
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence
SCREAMING_SNAKE_CASE_ = embedding_dimension
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = context_length
SCREAMING_SNAKE_CASE_ = prediction_length + label_length
SCREAMING_SNAKE_CASE_ = label_length
SCREAMING_SNAKE_CASE_ = moving_average
SCREAMING_SNAKE_CASE_ = autocorrelation_factor
def A_( self ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A_( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE_ = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE )
return config, inputs_dict
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.create_network_inputs(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE_ = encoder(inputs_embeds=SCREAMING_SNAKE_CASE )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
SCREAMING_SNAKE_CASE_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = decoder(
trend=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A = (AutoformerForPrediction,) if is_torch_available() else ()
A = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
A = False
A = False
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def A_( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info['missing_keys'] , [] )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Model has no tokens embeddings' )
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = inspect.signature(getattr(SCREAMING_SNAKE_CASE , 'forward' ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'decoder_seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'encoder_seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'd_model' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'num_attention_heads' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# decoder attentions
SCREAMING_SNAKE_CASE_ = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE_ = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A_( self ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase ( SCREAMING_SNAKE_CASE="train-batch.pt" ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=SCREAMING_SNAKE_CASE , repo_type='dataset' )
SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class a_ ( unittest.TestCase ):
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
SCREAMING_SNAKE_CASE_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
SCREAMING_SNAKE_CASE_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE , rtol=1e-1 ) )
| 205 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__A ) )]
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
snake_case: int = all_rotations(__A )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case: Tuple = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(__A ),
}
return response
def lowerCAmelCase_ ( __A : List[Any] , __A : Dict ):
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
snake_case: List[str] = int(__A )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__A ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
snake_case: str = [''] * len(__A )
for _ in range(len(__A ) ):
for i in range(len(__A ) ):
snake_case: Optional[int] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCAmelCase = "Provide a string that I will generate its BWT transform: "
__UpperCAmelCase = input(entry_msg).strip()
__UpperCAmelCase = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
__UpperCAmelCase = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
) | 703 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = only_cross_attention
snake_case: Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
snake_case: Tuple = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case: List[str] = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case: str = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case: Tuple = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
snake_case: int = None
snake_case: Tuple = None
# 3. Feed-forward
snake_case: Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
snake_case: Any = None
snake_case: Any = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = chunk_size
snake_case: str = dim
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
snake_case: Optional[int] = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case: int = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
snake_case: List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case: List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
snake_case: Tuple = gate_msa.unsqueeze(1 ) * attn_output
snake_case: List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case: Dict = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
snake_case: Any = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = attn_output + hidden_states
# 3. Feed-forward
snake_case: str = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
snake_case: List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case: Optional[Any] = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case: int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
snake_case: Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case: Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ):
'''simple docstring'''
super().__init__()
snake_case: int = int(dim * mult )
snake_case: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case: int = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
snake_case: Optional[Any] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate='tanh' )
elif activation_fn == "geglu":
snake_case: List[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
snake_case: Optional[int] = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for module in self.net:
snake_case: Optional[int] = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ):
'''simple docstring'''
super().__init__()
snake_case: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = approximate
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.proj(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: int = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.7_02 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = nn.SiLU()
snake_case: Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
snake_case: int = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case: Dict = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
snake_case: str = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.SiLU()
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case: str = emb.chunk(6 , dim=1 )
snake_case: Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ):
'''simple docstring'''
super().__init__()
snake_case: str = num_groups
snake_case: str = eps
if act_fn is None:
snake_case: Dict = None
else:
snake_case: List[str] = get_activation(SCREAMING_SNAKE_CASE__ )
snake_case: Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.act:
snake_case: Optional[Any] = self.act(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.linear(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = emb[:, :, None, None]
snake_case , snake_case: List[Any] = emb.chunk(2 , dim=1 )
snake_case: Any = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
snake_case: Optional[int] = x * (1 + scale) + shift
return x | 692 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase_ : Optional[Any] = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase_ : Optional[Any] = [ord(letter) for letter in string.ascii_lowercase]
lowercase_ : Optional[int] = {ord(char) for char in VALID_CHARS}
lowercase_ : Optional[int] = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = ""
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(lowerCamelCase__ ), lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase__ )
return decoded
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : list[str] = []
for key in product(lowerCamelCase__, repeat=3 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = try_key(lowerCamelCase__, lowerCamelCase__ )
if encoded is not None:
possibles.append(lowerCamelCase__ )
return possibles
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : Dict ) -> Dict:
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCAmelCase ( lowerCamelCase__ : str = "p059_cipher.txt" ) -> Tuple:
_SCREAMING_SNAKE_CASE : list[int]
_SCREAMING_SNAKE_CASE : list[str]
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase__ ).parent.joinpath(lowerCamelCase__ ).read_text(encoding="utf-8" )
_SCREAMING_SNAKE_CASE : int = [int(lowerCamelCase__ ) for number in data.strip().split("," )]
_SCREAMING_SNAKE_CASE : Any = filter_valid_chars(lowerCamelCase__ )
for common_word in COMMON_WORDS:
_SCREAMING_SNAKE_CASE : Tuple = filter_common_word(lowerCamelCase__, lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
break
_SCREAMING_SNAKE_CASE : Optional[int] = possibles[0]
return sum(ord(lowerCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 572 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = KandinskyVaaImgaImgPipeline
snake_case = ["image_embeds", "negative_image_embeds", "image"]
snake_case = [
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case = False
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self )->str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return 100
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ : Tuple = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self )->int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self )->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = self.dummy_unet
A_ : Optional[int] = self.dummy_movq
A_ : Dict = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
A_ : List[Any] = DDIMScheduler(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->List[str]:
'''simple docstring'''
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
A_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[int] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Any = '''cpu'''
A_ : List[str] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
A_ : Optional[Any] = output.images
A_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A_ : Dict = '''A red cartoon frog, 4k'''
A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
A_ : int = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
A_ : int = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ , A_ : Optional[Any] = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A_ : List[Any] = pipeline(
image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
A_ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 590 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( _UpperCAmelCase ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*A_ ,**A_ )
SCREAMING_SNAKE_CASE_ : Dict = {}
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = super().add_tokens(A_ ,*A_ ,**A_ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=1 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(A_ ,*A_ ,**A_ )
output.append(A_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in range(A_ ):
SCREAMING_SNAKE_CASE_ : List[str] = placeholder_token + F'_{i}'
self.try_adding_tokens(A_ ,*A_ ,**A_ )
output.append(A_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : Any = output
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=1.0 ):
if isinstance(A_ ,A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(A_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=A_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : Tuple = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokens[: 1 + int(len(A_ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : Tuple = copy.copy(A_ )
random.shuffle(A_ )
SCREAMING_SNAKE_CASE_ : int = text.replace(A_ ,' '.join(A_ ) )
return text
def __call__( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
A_ ,vector_shuffle=A_ ,prop_tokens_to_load=A_ ) ,*A_ ,**A_ ,)
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
A_ ,vector_shuffle=A_ ,prop_tokens_to_load=A_ ) ,*A_ ,**A_ ,)
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 517 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__=False , ) -> Optional[Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=a__ , exist_ok=a__)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , use_external_data_format=a__ , enable_onnx_checker=a__ , opset_version=a__ , )
else:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , opset_version=a__ , )
@torch.no_grad()
def lowerCamelCase__ ( a__ , a__ , a__ , a__ = False) -> List[str]:
"""simple docstring"""
_snake_case : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case : Optional[Any] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA')
else:
_snake_case : List[str] = 'cpu'
_snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=a__).to(a__)
_snake_case : Tuple = Path(a__)
# TEXT ENCODER
_snake_case : List[str] = pipeline.text_encoder.config.max_position_embeddings
_snake_case : Any = pipeline.text_encoder.config.hidden_size
_snake_case : Union[str, Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=a__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a__ , dtype=torch.intaa)) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=a__ , )
del pipeline.text_encoder
# UNET
_snake_case : List[Any] = pipeline.unet.config.in_channels
_snake_case : List[Any] = pipeline.unet.config.sample_size
_snake_case : Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a__ , a__ , a__).to(device=a__ , dtype=a__),
torch.randn(2).to(device=a__ , dtype=a__),
torch.randn(2 , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=a__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=a__ , use_external_data_format=a__ , )
_snake_case : Optional[Any] = str(unet_path.absolute().as_posix())
_snake_case : int = os.path.dirname(a__)
_snake_case : int = onnx.load(a__)
# clean up existing tensor files
shutil.rmtree(a__)
os.mkdir(a__)
# collate external tensor files into one
onnx.save_model(
a__ , a__ , save_as_external_data=a__ , all_tensors_to_one_file=a__ , location='weights.pb' , convert_attribute=a__ , )
del pipeline.unet
# VAE ENCODER
_snake_case : Optional[int] = pipeline.vae
_snake_case : List[str] = vae_encoder.config.in_channels
_snake_case : str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_snake_case : str = lambda a__ , a__: vae_encoder.encode(a__ , a__)[0].sample()
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
# VAE DECODER
_snake_case : str = pipeline.vae
_snake_case : Tuple = vae_decoder.config.latent_channels
_snake_case : List[Any] = vae_decoder.config.out_channels
# forward only through the decoder part
_snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_snake_case : Optional[Any] = pipeline.safety_checker
_snake_case : List[str] = safety_checker.config.vision_config.num_channels
_snake_case : Tuple = safety_checker.config.vision_config.image_size
_snake_case : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a__ , a__ , a__ , ).to(device=a__ , dtype=a__),
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=a__ , )
del pipeline.safety_checker
_snake_case : Any = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker')
_snake_case : Dict = pipeline.feature_extractor
else:
_snake_case : List[Any] = None
_snake_case : int = None
_snake_case : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet') , scheduler=pipeline.scheduler , safety_checker=a__ , feature_extractor=a__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a__)
print('ONNX pipeline saved to' , a__)
del pipeline
del onnx_pipeline
_snake_case : str = OnnxStableDiffusionPipeline.from_pretrained(a__ , provider='CPUExecutionProvider')
print('ONNX pipeline is loadable')
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 517 | 1 |
import re
def __UpperCAmelCase ( __A ) -> List[Any]:
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def __UpperCAmelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __UpperCAmelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
try:
UpperCAmelCase__ = split_input(__snake_case )
if upper:
UpperCAmelCase__ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __UpperCAmelCase ( __A ) -> int:
'''simple docstring'''
return to_simple_case(__snake_case )
def __UpperCAmelCase ( __A ) -> Optional[int]:
'''simple docstring'''
try:
UpperCAmelCase__ = to_simple_case(__snake_case )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __UpperCAmelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
return to_complex_case(__snake_case , __snake_case , "_" )
def __UpperCAmelCase ( __A , __A ) -> str:
'''simple docstring'''
return to_complex_case(__snake_case , __snake_case , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 705 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A = logging.getLogger(__name__)
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
A__= field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
A__= field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
A__= field(
default='linear' , metadata={'help': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 277 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Optional[Any] = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 93 |
__lowerCamelCase : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( _a : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( _a : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
'''simple docstring'''
snake_case__ : List[Any] ="""Morse code here!"""
print(_a )
snake_case__ : Union[str, Any] =encrypt(_a )
print(_a )
snake_case__ : Optional[int] =decrypt(_a )
print(_a )
if __name__ == "__main__":
main()
| 385 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__UpperCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def A_ ( self : Optional[Any] , **__lowercase : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Any , **__lowercase : Optional[Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Dict , **__lowercase : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__UpperCAmelCase : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = image_processor(__lowercase , return_tensors='''np''' )
__UpperCAmelCase : Optional[int] = processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : int = '''lower newer'''
__UpperCAmelCase : int = processor(text=__lowercase )
__UpperCAmelCase : Any = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = '''lower newer'''
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : str = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(images=__lowercase , visual_prompt=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(__lowercase )
__UpperCAmelCase : int = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase ) | 374 |
"""simple docstring"""
def lowerCamelCase_ ( ) ->str:
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = 2
while i * i <= n:
__UpperCAmelCase : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 5_00 )
if __name__ == "__main__":
print(solution()) | 374 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger()
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=lowerCAmelCase__ )
UpperCAmelCase : list = field(default_factory=lowerCAmelCase__ )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Tensor ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase_ , nn.Convad ) or isinstance(lowerCAmelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase_ )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Tensor ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase_ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return list(filter(lambda lowerCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=lowerCAmelCase__ )
UpperCAmelCase : List = field(default_factory=lowerCAmelCase__ )
def __call__( self : int , lowerCAmelCase_ : Tensor ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Tracker(self.dest )(lowerCAmelCase_ ).parametrized
SCREAMING_SNAKE_CASE_ = Tracker(self.src )(lowerCAmelCase_ ).parametrized
SCREAMING_SNAKE_CASE_ = list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.src_skip , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.dest_skip , lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase_ )} operations while'''
F''' destination module has {len(lowerCAmelCase_ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = True )-> Any:
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = timm.create_model(UpperCAmelCase ,pretrained=UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = ResNetForImageClassification(UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = ModuleTransfer(src=UpperCAmelCase ,dest=UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCAmelCase )
assert torch.allclose(from_model(UpperCAmelCase ) ,our_model(UpperCAmelCase ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='''Add model''' ,use_temp_dir=UpperCAmelCase ,)
# we can use the convnext one
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='''Add image processor''' ,use_temp_dir=UpperCAmelCase ,)
print(f'''Pushed {checkpoint_name}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase = None ,UpperCAmelCase = True )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(UpperCAmelCase ,UpperCAmelCase ,repo_type='''dataset''' ) ,'''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(UpperCAmelCase ,num_labels=UpperCAmelCase ,idalabel=UpperCAmelCase ,labelaid=UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase ,names_to_config[model_name] ,UpperCAmelCase ,UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
A_ = parser.parse_args()
A_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 393 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def _lowercase ( self : int , lowerCAmelCase_ : str=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**lowerCAmelCase_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = False
return options
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 393 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """efficientnet"""
def __init__( self :int , __snake_case :int = 3 , __snake_case :int = 6_00 , __snake_case :float = 2.0 , __snake_case :float = 3.1 , __snake_case :int = 8 , __snake_case :List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case :List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , __snake_case :List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , __snake_case :List[int] = [] , __snake_case :List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case :List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case :List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case :float = 0.25 , __snake_case :str = "swish" , __snake_case :int = 25_60 , __snake_case :str = "mean" , __snake_case :float = 0.02 , __snake_case :float = 0.001 , __snake_case :float = 0.99 , __snake_case :float = 0.5 , __snake_case :float = 0.2 , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Optional[Any] =num_channels
__magic_name__ : Tuple =image_size
__magic_name__ : Union[str, Any] =width_coefficient
__magic_name__ : Dict =depth_coefficient
__magic_name__ : Union[str, Any] =depth_divisor
__magic_name__ : int =kernel_sizes
__magic_name__ : Any =in_channels
__magic_name__ : Optional[Any] =out_channels
__magic_name__ : Optional[int] =depthwise_padding
__magic_name__ : Optional[int] =strides
__magic_name__ : Dict =num_block_repeats
__magic_name__ : Union[str, Any] =expand_ratios
__magic_name__ : List[Any] =squeeze_expansion_ratio
__magic_name__ : Union[str, Any] =hidden_act
__magic_name__ : Dict =hidden_dim
__magic_name__ : int =pooling_type
__magic_name__ : List[str] =initializer_range
__magic_name__ : Tuple =batch_norm_eps
__magic_name__ : Optional[Any] =batch_norm_momentum
__magic_name__ : Optional[int] =dropout_rate
__magic_name__ : Optional[int] =drop_connect_rate
__magic_name__ : int =sum(__snake_case ) * 4
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :str ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Dict ):
'''simple docstring'''
return 1E-5
| 367 |
from math import pow, sqrt
def lowerCAmelCase_ ( *lowerCamelCase ):
__magic_name__ : Tuple =len(lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 367 | 1 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('KEY')
UpperCamelCase__ = TypeVar('VAL')
@dataclass(frozen=_UpperCAmelCase , slots=_UpperCAmelCase )
class a ( Generic[KEY, VAL] ):
UpperCamelCase : KEY
UpperCamelCase : VAL
class a ( _Item ):
def __init__( self ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __bool__( self ):
return False
UpperCamelCase__ = _DeletedItem()
class a ( MutableMapping[KEY, VAL] ):
def __init__( self , UpperCamelCase_ = 8 , UpperCamelCase_ = 0.75 ):
UpperCAmelCase__ : Optional[Any] = initial_block_size
UpperCAmelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase__ : List[Any] = capacity_factor
UpperCAmelCase__ : Union[str, Any] = 0
def __snake_case ( self , UpperCamelCase_ ):
return hash(_UpperCAmelCase ) % len(self._buckets )
def __snake_case ( self , UpperCamelCase_ ):
return (ind + 1) % len(self._buckets )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = self._buckets[ind]
if not stored:
UpperCAmelCase__ : int = _Item(_UpperCAmelCase , _UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase__ : List[str] = _Item(_UpperCAmelCase , _UpperCAmelCase )
return True
else:
return False
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCAmelCase )
def __snake_case ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = self._buckets
UpperCAmelCase__ : Optional[int] = [None] * new_size
UpperCAmelCase__ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __snake_case ( self ):
self._resize(len(self._buckets ) * 2 )
def __snake_case ( self ):
self._resize(len(self._buckets ) // 2 )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = self._get_bucket_index(_UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase__ : Union[str, Any] = self._get_next_ind(_UpperCAmelCase )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
break
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
if self._is_full():
self._size_up()
self._add_item(_UpperCAmelCase , _UpperCAmelCase )
def __delitem__( self , UpperCamelCase_ ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(_UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase__ : Tuple = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCamelCase_ ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
UpperCAmelCase__ : str = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCAmelCase )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
UpperCAmelCase__ : List[Any] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 110 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "canine"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=16384 , _UpperCAmelCase=16 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=0XE000 , _UpperCAmelCase=0XE001 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=8 , _UpperCAmelCase=16384 , _UpperCAmelCase=128 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = max_position_embeddings
lowercase__: Tuple = hidden_size
lowercase__: Optional[Any] = num_hidden_layers
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: Tuple = type_vocab_size
lowercase__: Any = layer_norm_eps
# Character config:
lowercase__: List[str] = downsampling_rate
lowercase__: Union[str, Any] = upsampling_kernel_size
lowercase__: Union[str, Any] = num_hash_functions
lowercase__: Optional[Any] = num_hash_buckets
lowercase__: Tuple = local_transformer_stride
| 586 | 0 |
A_ : int = tuple[float, float, float]
A_ : Optional[int] = tuple[float, float, float]
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[int] ) -> Vectorad:
A__ : List[Any] = end_pointa[0] - end_pointa[0]
A__ : Optional[int] = end_pointa[1] - end_pointa[1]
A__ : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase (lowercase_: Tuple , lowercase_: Any ) -> Vectorad:
A__ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase (lowercase_: Dict , lowercase_: Any ) -> bool:
return tuple(round(_A , _A ) for x in vector ) == (0, 0, 0)
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple , lowercase_: List[Any] , lowercase_: int = 10 ) -> bool:
A__ : Dict = create_vector(_A , _A )
A__ : List[Any] = create_vector(_A , _A )
return is_zero_vector(get_ad_vectors_cross(_A , _A ) , _A )
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = data
UpperCAmelCase__ : List[str] = None
def __repr__( self ):
return F'''Node({self.data})'''
class a :
def __init__( self ):
UpperCAmelCase__ : Any = None
def __iter__( self ):
UpperCAmelCase__ : List[str] = self.head
while node:
yield node.data
UpperCAmelCase__ : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __getitem__( self , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCAmelCase__ : List[str] = self.head
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = current.next
UpperCAmelCase__ : List[str] = data
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(len(self ) , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(0 , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCAmelCase__ : str = Node(UpperCamelCase_ )
if self.head is None:
UpperCAmelCase__ : Tuple = new_node
elif index == 0:
UpperCAmelCase__ : Optional[int] = self.head # link new_node to head
UpperCAmelCase__ : Any = new_node
else:
UpperCAmelCase__ : Dict = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : int = temp.next
UpperCAmelCase__ : Tuple = new_node
def __snake_case ( self ): # print every node data
print(self )
def __snake_case ( self ):
return self.delete_nth(0 )
def __snake_case ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __snake_case ( self , UpperCamelCase_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCAmelCase__ : Union[str, Any] = self.head # default first node
if index == 0:
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Any = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next.next
return delete_node.data
def __snake_case ( self ):
return self.head is None
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
UpperCAmelCase__ : Any = current.next
# Make the current node's next point backwards
UpperCAmelCase__ : Optional[int] = prev
# Make the previous node be the current node
UpperCAmelCase__ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase__ : int = next_node
# Return prev in order to put the head at the end
UpperCAmelCase__ : str = prev
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(_snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_snake_case ) == i
linked_list.insert_nth(_snake_case ,i + 1 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_snake_case ) == 9
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCAmelCase__ : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(-8 ,1 ) )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase__ : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase__ : Any = linked_list.delete_head()
assert result == -9
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase__ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase__ : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_snake_case )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ( ):
from doctest import testmod
testmod()
UpperCAmelCase__ : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_snake_case )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCAmelCase__ : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_snake_case )
print(F'''length of linked_list is : {len(_snake_case )}''' )
if __name__ == "__main__":
main()
| 110 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , __lowerCamelCase , __lowerCamelCase=2 , __lowerCamelCase=56 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=32 , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=7 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_12 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , __lowerCamelCase="block_sparse" , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=2 , __lowerCamelCase=3 , ) -> int:
'''simple docstring'''
snake_case: str = parent
snake_case: Any = batch_size
snake_case: List[str] = seq_length
snake_case: str = is_training
snake_case: List[str] = use_attention_mask
snake_case: Optional[Any] = use_token_type_ids
snake_case: Union[str, Any] = use_labels
snake_case: Dict = vocab_size
snake_case: Dict = hidden_size
snake_case: Optional[Any] = num_hidden_layers
snake_case: int = num_attention_heads
snake_case: List[Any] = intermediate_size
snake_case: Optional[Any] = hidden_act
snake_case: List[str] = hidden_dropout_prob
snake_case: Dict = attention_probs_dropout_prob
snake_case: Optional[Any] = max_position_embeddings
snake_case: str = type_vocab_size
snake_case: Dict = type_sequence_label_size
snake_case: List[Any] = initializer_range
snake_case: str = num_choices
snake_case: Any = rescale_embeddings
snake_case: int = attention_type
snake_case: int = use_bias
snake_case: List[str] = block_size
snake_case: Any = num_random_blocks
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Tuple = None
if self.use_attention_mask:
snake_case: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Tuple = None
if self.use_token_type_ids:
snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case: Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: Union[str, Any] = self.prepare_config_and_inputs()
snake_case: List[Any] = config_and_inputs
snake_case: Any = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase ( __snake_case , unittest.TestCase ):
__lowerCamelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__lowerCamelCase = False
__lowerCamelCase = False
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
snake_case: List[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case: Optional[Any] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case: Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
snake_case: Tuple = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
return model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
snake_case: Union[str, Any] = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case: List[str] = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1e-5 , __lowerCamelCase="outputs" , __lowerCamelCase=None ) -> List[str]:
'''simple docstring'''
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 706 | import re
def a_ (_lowerCAmelCase : str )-> list:
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def a_ (_lowerCAmelCase : str )-> str:
snake_case: Tuple = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str )-> str:
try:
snake_case: int = split_input(_lowerCAmelCase )
if upper:
snake_case: List[Any] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
snake_case: Dict = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a_ (_lowerCAmelCase : str )-> str:
return to_simple_case(_lowerCAmelCase )
def a_ (_lowerCAmelCase : str )-> str:
try:
snake_case: Tuple = to_simple_case(_lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool )-> str:
return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , """_""" )
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : bool )-> str:
return to_complex_case(_lowerCAmelCase , _lowerCAmelCase , """-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 164 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.