code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _snake_case ( pl.LightningModule ):
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: int ) -> int:
super().__init__()
UpperCAmelCase_ : int = model
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Any = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def A__ ( self: int ) -> Optional[Any]:
pass
def lowerCamelCase_ ( _a : str , _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = LongformerModel.from_pretrained(_a )
UpperCAmelCase_ : Optional[int] = LightningModel(_a )
UpperCAmelCase_ : Optional[Any] = torch.load(_a , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCAmelCase_ : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(_a )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_a )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCamelCase_ = '''
Human: <<task>>
Assistant: '''
UpperCamelCase_ = '''huggingface-tools/default-prompts'''
UpperCamelCase_ = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowerCamelCase_ ( _a : Union[str, Any] , _a : List[Any] , _a : Any="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCAmelCase_ : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _a ) is not None:
return prompt_or_repo_id
UpperCAmelCase_ : Dict = cached_file(
_a , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(_a , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
# Imports
import numpy as np
class _snake_case :
'''simple docstring'''
def __init__( self: Union[str, Any] ,lowerCamelCase_: Dict=None ,lowerCamelCase_: Dict=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=None ) -> List[str]:
self.set_matricies(red=lowerCamelCase_ ,green=lowerCamelCase_ ,blue=lowerCamelCase_ ,red_edge=lowerCamelCase_ ,nir=lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Union[str, Any]=None ) -> int:
if red is not None:
UpperCAmelCase_ : int = red
if green is not None:
UpperCAmelCase_ : str = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : List[Any] = red_edge
if nir is not None:
UpperCAmelCase_ : List[Any] = nir
return True
def A__ ( self: int ,lowerCamelCase_: Optional[Any]="" ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Any=None ,lowerCamelCase_: int=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Tuple=None ) -> Optional[int]:
self.set_matricies(red=lowerCamelCase_ ,green=lowerCamelCase_ ,blue=lowerCamelCase_ ,red_edge=lowerCamelCase_ ,nir=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def A__ ( self: int ) -> Any:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def A__ ( self: Optional[int] ) -> Optional[int]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A__ ( self: Any ) -> Any:
return self.nir * (self.red / (self.green**2))
def A__ ( self: Tuple ) -> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A__ ( self: str ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def A__ ( self: Optional[int] ) -> Tuple:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A__ ( self: int ) -> Any:
return (self.nir - self.green) / (self.nir + self.green)
def A__ ( self: List[str] ) -> List[Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A__ ( self: str ) -> Union[str, Any]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A__ ( self: List[Any] ) -> List[str]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A__ ( self: Dict ) -> Tuple:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A__ ( self: List[str] ,lowerCamelCase_: Dict=0.0_8 ,lowerCamelCase_: Optional[int]=1.2_2 ,lowerCamelCase_: List[Any]=0.0_3 ) -> Tuple:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A__ ( self: List[Any] ) -> Tuple:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A__ ( self: Optional[int] ) -> Union[str, Any]:
return (self.nir / self.green) - 1
def A__ ( self: Optional[Any] ) -> int:
return (self.nir / self.redEdge) - 1
def A__ ( self: Dict ) -> str:
return (self.red - self.blue) / self.red
def A__ ( self: Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A__ ( self: int ) -> Dict:
return self.nir - self.green
def A__ ( self: List[str] ) -> str:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A__ ( self: str ) -> int:
UpperCAmelCase_ : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple=0.1_6 ) -> str:
return (self.nir - self.green) / (self.nir + self.green + y)
def A__ ( self: int ,lowerCamelCase_: Union[str, Any]=0.5 ) -> Optional[int]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A__ ( self: List[str] ) -> Tuple:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def A__ ( self: Any ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: Any=None ) -> str:
return (self.nir - b) / (a * self.red)
def A__ ( self: List[Any] ) -> Tuple:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A__ ( self: List[str] ) -> Dict:
return (self.red + self.green + self.blue) / 3_0.5
def A__ ( self: Dict ) -> Tuple:
return self.nir / self.red
def A__ ( self: List[str] ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def A__ ( self: Any ) -> Union[str, Any]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A__ ( self: List[Any] ) -> Dict:
return self.green / (self.nir + self.red + self.green)
def A__ ( self: Tuple ) -> List[str]:
return self.nir / (self.nir + self.red + self.green)
def A__ ( self: Optional[int] ) -> List[str]:
return self.red / (self.nir + self.red + self.green)
def A__ ( self: Union[str, Any] ) -> Dict:
return (self.green - self.red) / (self.green + self.red)
def A__ ( self: str ) -> int:
return (self.red - self.green) / (self.red + self.green)
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A__ ( self: Union[str, Any] ) -> Any:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A__ ( self: Union[str, Any] ) -> int:
return self.nir / self.red
def A__ ( self: Union[str, Any] ) -> Tuple:
return (self.ndvi() + 0.5) ** (1 / 2)
def A__ ( self: List[Any] ) -> List[Any]:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
def lowerCamelCase_ ( _a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase_ ( _a : list[list[int]] , _a : list[int] , _a : int ):
'''simple docstring'''
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase_ : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase_ : Tuple = -1
return False
def lowerCamelCase_ ( _a : list[list[int]] , _a : int = 0 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase_ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
UpperCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
UpperCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Any = "whisper"
A__ : Optional[Any] = ["past_key_values"]
A__ : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: Any ,lowerCamelCase_: Optional[int]=51865 ,lowerCamelCase_: str=80 ,lowerCamelCase_: Tuple=6 ,lowerCamelCase_: Tuple=4 ,lowerCamelCase_: int=6 ,lowerCamelCase_: Tuple=4 ,lowerCamelCase_: Dict=1536 ,lowerCamelCase_: Tuple=1536 ,lowerCamelCase_: str=0.0 ,lowerCamelCase_: Dict=0.0 ,lowerCamelCase_: str=50257 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Any=256 ,lowerCamelCase_: str=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: Optional[int]=False ,lowerCamelCase_: List[str]=1500 ,lowerCamelCase_: Dict=448 ,lowerCamelCase_: List[Any]=50256 ,lowerCamelCase_: Optional[Any]=50256 ,lowerCamelCase_: Optional[Any]=50256 ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: List[str]=[220, 50256] ,lowerCamelCase_: str=False ,lowerCamelCase_: List[Any]=256 ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Union[str, Any]=0.0_5 ,lowerCamelCase_: List[str]=10 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Union[str, Any]=0.0 ,lowerCamelCase_: Any=10 ,lowerCamelCase_: Dict=0 ,lowerCamelCase_: str=7 ,**lowerCamelCase_: int ,) -> List[str]:
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = num_mel_bins
UpperCAmelCase_ : Any = d_model
UpperCAmelCase_ : List[str] = encoder_layers
UpperCAmelCase_ : Optional[Any] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_layers
UpperCAmelCase_ : Dict = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : int = encoder_ffn_dim
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : List[str] = init_std
UpperCAmelCase_ : List[Any] = encoder_layerdrop
UpperCAmelCase_ : List[str] = decoder_layerdrop
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Optional[int] = encoder_layers
UpperCAmelCase_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : List[str] = max_source_positions
UpperCAmelCase_ : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = classifier_proj_size
UpperCAmelCase_ : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[Any] = apply_spec_augment
UpperCAmelCase_ : Union[str, Any] = mask_time_prob
UpperCAmelCase_ : List[Any] = mask_time_length
UpperCAmelCase_ : int = mask_time_min_masks
UpperCAmelCase_ : Union[str, Any] = mask_feature_prob
UpperCAmelCase_ : Optional[Any] = mask_feature_length
UpperCAmelCase_ : List[Any] = mask_feature_min_masks
UpperCAmelCase_ : Optional[int] = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,suppress_tokens=lowerCamelCase_ ,begin_suppress_tokens=lowerCamelCase_ ,**lowerCamelCase_ ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: str ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Tuple = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ : Optional[int] = {0: """batch"""}
else:
UpperCAmelCase_ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
def A__ ( self: List[str] ,lowerCamelCase_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 22050 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: int = 220 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Tuple = OrderedDict()
UpperCAmelCase_ : Any = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=lowerCamelCase_ ,framework=lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,time_duration=lowerCamelCase_ ,frequency=lowerCamelCase_ ,)
UpperCAmelCase_ : Tuple = encoder_inputs["""input_features"""].shape[2]
UpperCAmelCase_ : str = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase_ : int = super().generate_dummy_inputs(
preprocessor.tokenizer ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = encoder_inputs.pop("""input_features""" )
UpperCAmelCase_ : Any = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase_ : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def A__ ( self: int ) -> float:
return 1e-3
| 345 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> Any:
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_ : int = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,lowerCamelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Tuple ,**lowerCamelCase_: Any ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Dict ,**lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Tuple ,**lowerCamelCase_: Optional[Any] ) -> Dict:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Union[str, Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : str = [Image.fromarray(np.moveaxis(lowerCamelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=lowerCamelCase_ )
UpperCAmelCase_ : Any = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
UpperCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.prepare_image_inputs()
UpperCAmelCase_ : List[Any] = image_processor(lowerCamelCase_ ,return_tensors="""np""" )
UpperCAmelCase_ : Union[str, Any] = processor(images=lowerCamelCase_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def A__ ( self: List[Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ : Union[str, Any] = processor(text=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def A__ ( self: Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Any = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=lowerCamelCase_ ,images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : Union[str, Any] = processor(text=lowerCamelCase_ ,images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 345 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Optional[Any] ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: int ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
def lowerCamelCase_ ( _a : list , _a : list , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = len(_a )
UpperCAmelCase_ : Optional[int] = [[0] * n for i in range(_a )]
for i in range(_a ):
UpperCAmelCase_ : List[str] = y_points[i]
for i in range(2 , _a ):
for j in range(_a , _a ):
UpperCAmelCase_ : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : int = MobileBertTokenizer
A__ : List[Any] = MobileBertTokenizerFast
A__ : Optional[Any] = True
A__ : List[str] = True
A__ : Tuple = filter_non_english
A__ : Optional[Any] = "google/mobilebert-uncased"
def A__ ( self: Union[str, Any] ) -> Tuple:
super().setUp()
UpperCAmelCase_ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_ : Optional[int] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A__ ( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase_ : Optional[int] = """unwanted, running"""
return input_text, output_text
def A__ ( self: Optional[int] ) -> Dict:
UpperCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase_ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[9, 6, 7, 12, 10, 11] )
def A__ ( self: Tuple ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : str = """UNwant\u00E9d,running"""
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(lowerCamelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = tokenizer.encode(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
# With lower casing
UpperCAmelCase_ : Any = self.get_tokenizer(do_lower_case=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer(do_lower_case=lowerCamelCase_ )
UpperCAmelCase_ : str = """UNwant\u00E9d,running"""
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(lowerCamelCase_ )
UpperCAmelCase_ : str = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def A__ ( self: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCamelCase_ ,strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ ,strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def A__ ( self: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : int = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCamelCase_ ,strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self: List[str] ) -> int:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCamelCase_ ,strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase_ : Any = {}
for i, token in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : int = i
UpperCAmelCase_ : Optional[int] = WordpieceTokenizer(vocab=lowerCamelCase_ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def A__ ( self: List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def A__ ( self: Any ) -> str:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def A__ ( self: Any ) -> str:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def A__ ( self: Optional[int] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase_ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase_ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A__ ( self: Any ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase_ : Optional[int] = tokenizer_r.encode_plus(
lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,)
UpperCAmelCase_ : str = tokenizer_r.do_lower_case if hasattr(lowerCamelCase_ ,"""do_lower_case""" ) else False
UpperCAmelCase_ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ["""的""", """人""", """有"""]
UpperCAmelCase_ : Tuple = """""".join(lowerCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_r.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
UpperCAmelCase_ : int = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_r.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer_p.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Tuple = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCamelCase_ )
]
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
| 345 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : UNetaDModel
A__ : ScoreSdeVeScheduler
def __init__( self: Any ,lowerCamelCase_: UNetaDModel ,lowerCamelCase_: ScoreSdeVeScheduler ) -> Any:
super().__init__()
self.register_modules(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self: Optional[int] ,lowerCamelCase_: int = 1 ,lowerCamelCase_: int = 2000 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,**lowerCamelCase_: List[str] ,) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = self.unet.config.sample_size
UpperCAmelCase_ : Tuple = (batch_size, 3, img_size, img_size)
UpperCAmelCase_ : Tuple = self.unet
UpperCAmelCase_ : Union[str, Any] = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
UpperCAmelCase_ : Dict = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase_ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase_ : int = self.unet(lowerCamelCase_ ,lowerCamelCase_ ).sample
UpperCAmelCase_ : List[str] = self.scheduler.step_correct(lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# prediction step
UpperCAmelCase_ : Any = model(lowerCamelCase_ ,lowerCamelCase_ ).sample
UpperCAmelCase_ : Any = self.scheduler.step_pred(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = output.prev_sample, output.prev_sample_mean
UpperCAmelCase_ : Dict = sample_mean.clamp(0 ,1 )
UpperCAmelCase_ : List[Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Union[str, Any] = "ViTImageProcessor"
A__ : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: List[Any] ,lowerCamelCase_: Dict=None ,lowerCamelCase_: int=None ,**lowerCamelCase_: str ) -> Any:
UpperCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self: Union[str, Any] ,lowerCamelCase_: Dict=None ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: List[str]=None ,**lowerCamelCase_: str ) -> str:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if visual_prompt is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if images is not None:
UpperCAmelCase_ : List[Any] = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ : Dict = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ : Optional[Any] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def A__ ( self: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> str:
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def A__ ( self: Any ) -> Dict:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def A__ ( self: Tuple ) -> int:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase_ ,)
return self.image_processor
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: List[str]=10 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[Any]=32 ,lowerCamelCase_: str=5 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: Optional[Any]="divided_space_time" ,lowerCamelCase_: Tuple=None ,) -> List[Any]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : List[Any] = num_frames
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = attention_type
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase_ : Optional[int] = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def A__ ( self: str ) -> str:
UpperCAmelCase_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
UpperCAmelCase_ : Dict = self.num_labels
return config
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : int = TimesformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = TimesformerForVideoClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
# verify the logits shape
UpperCAmelCase_ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
A__ : int = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : List[str] = False
A__ : List[Any] = False
A__ : int = False
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Dict = TimesformerModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(
self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=False ) -> int:
UpperCAmelCase_ : Dict = copy.deepcopy(lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def A__ ( self: List[str] ) -> List[Any]:
pass
def A__ ( self: List[str] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Dict:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCamelCase_ )
@slow
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = TimesformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Tuple ) -> Optional[int]:
if not self.has_attentions:
pass
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = self.model_tester.seq_length
UpperCAmelCase_ : Optional[int] = self.model_tester.num_frames
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
UpperCAmelCase_ : int = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(out_len + 1 ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def A__ ( self: List[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ):
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = True
check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase_ : Any = np.load(_a )
return list(_a )
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: int ) -> Any:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : str = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : int = prepare_video()
UpperCAmelCase_ : Any = image_processor(video[:8] ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "sew-d"
def __init__( self: Any ,lowerCamelCase_: str=32 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Union[str, Any]=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: Dict=512 ,lowerCamelCase_: List[str]=256 ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Optional[int]=("p2c", "c2p") ,lowerCamelCase_: Optional[Any]="layer_norm" ,lowerCamelCase_: str="gelu_python" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: Any=1e-7 ,lowerCamelCase_: Optional[Any]=1e-5 ,lowerCamelCase_: List[Any]="group" ,lowerCamelCase_: List[str]="gelu" ,lowerCamelCase_: List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase_: Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase_: Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: List[str]=128 ,lowerCamelCase_: List[str]=16 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: str=0.0_5 ,lowerCamelCase_: int=10 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Any=0.0 ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any="mean" ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: str=False ,lowerCamelCase_: List[Any]=256 ,lowerCamelCase_: Dict=0 ,lowerCamelCase_: str=1 ,lowerCamelCase_: Union[str, Any]=2 ,**lowerCamelCase_: Dict ,) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[Any] = feat_extract_norm
UpperCAmelCase_ : Optional[Any] = feat_extract_activation
UpperCAmelCase_ : int = list(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = conv_bias
UpperCAmelCase_ : List[Any] = num_conv_pos_embeddings
UpperCAmelCase_ : Union[str, Any] = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = squeeze_factor
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[int] = position_buckets
UpperCAmelCase_ : List[Any] = share_att_key
UpperCAmelCase_ : str = relative_attention
UpperCAmelCase_ : Optional[Any] = norm_rel_ebd
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : List[str] = activation_dropout
UpperCAmelCase_ : Tuple = feat_proj_dropout
UpperCAmelCase_ : Optional[int] = final_dropout
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = feature_layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[int] = mask_time_prob
UpperCAmelCase_ : Tuple = mask_time_length
UpperCAmelCase_ : str = mask_time_min_masks
UpperCAmelCase_ : List[str] = mask_feature_prob
UpperCAmelCase_ : int = mask_feature_length
UpperCAmelCase_ : int = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# sequence classification
UpperCAmelCase_ : Tuple = use_weighted_layer_sum
UpperCAmelCase_ : Any = classifier_proj_size
@property
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 345 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 1 |
from collections import defaultdict
from math import gcd
def lowerCamelCase_ ( _a : int = 150_0000 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(_a )
UpperCAmelCase_ : Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _a , 2 ):
if gcd(_a , _a ) > 1:
continue
UpperCAmelCase_ : Union[str, Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_a , limit + 1 , _a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
from collections.abc import Callable
def lowerCamelCase_ ( _a : Callable[[float], float] , _a : float , _a : float ):
'''simple docstring'''
UpperCAmelCase_ : float = a
UpperCAmelCase_ : float = b
if function(_a ) == 0: # one of the a or b is a root for the function
return a
elif function(_a ) == 0:
return b
elif (
function(_a ) * function(_a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCAmelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_a ) == 0:
return mid
elif function(_a ) * function(_a ) < 0:
UpperCAmelCase_ : Tuple = mid
else:
UpperCAmelCase_ : Tuple = mid
UpperCAmelCase_ : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : list[tuple[float, float]] ) ->Optional[Any]:
"""simple docstring"""
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__UpperCAmelCase ) - 1
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : float ) ->list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : float ) ->tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__UpperCAmelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCAmelCase ( self : int , __UpperCAmelCase : float = 0.01 ) ->Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = """vit_mae"""
def __init__(self : Any , UpperCamelCase : List[str]=768 , UpperCamelCase : Dict=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Any=3072 , UpperCamelCase : Dict="gelu" , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : Optional[Any]=224 , UpperCamelCase : Tuple=16 , UpperCamelCase : str=3 , UpperCamelCase : int=True , UpperCamelCase : List[Any]=16 , UpperCamelCase : str=512 , UpperCamelCase : int=8 , UpperCamelCase : Optional[int]=2048 , UpperCamelCase : List[str]=0.75 , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = decoder_num_attention_heads
lowercase__ = decoder_hidden_size
lowercase__ = decoder_num_hidden_layers
lowercase__ = decoder_intermediate_size
lowercase__ = mask_ratio
lowercase__ = norm_pix_loss
| 2 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 0 |
'''simple docstring'''
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = len(lowerCamelCase )
for i in range(1 , lowerCamelCase ):
lowerCAmelCase = collection[i]
lowerCAmelCase = 0
lowerCAmelCase = i - 1
while low <= high:
lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase = mid - 1
else:
lowerCAmelCase = mid + 1
for j in range(lowerCamelCase , lowerCamelCase , -1 ):
lowerCAmelCase = collection[j - 1]
lowerCAmelCase = val
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 4 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : List[Any] = logging.get_logger(__name__)
A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Dict = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : List[Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
A : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a )
class __A:
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
elif titles is None or texts is None:
__a = titles if texts is None else texts
return super().__call__(
_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
__a = titles if not isinstance(_snake_case , _snake_case ) else [titles]
__a = texts if not isinstance(_snake_case , _snake_case ) else [texts]
__a = len(_snake_case )
__a = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages
assert len(_snake_case ) == len(
_snake_case ), F"""There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts."""
__a = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case )
]
}
if return_attention_mask is not False:
__a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a = attention_mask
return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = 64 , _snake_case = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = reader_input['''input_ids''']
__a , __a , __a = reader_output[:3]
__a = len(_snake_case )
__a = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ )
__a = []
for doc_id in sorted_docs:
__a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a = sequence_ids.index(self.pad_token_id )
else:
__a = len(_snake_case )
__a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = []
for start_index, start_score in enumerate(_snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case )
__a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__a = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class __A( a , a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = DPRReaderTokenizer | 6 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = DebertaTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaTokenizerFast
def __magic_name__( self :List[str] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''unk_token''': '''[UNK]'''}
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :str ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
__SCREAMING_SNAKE_CASE : int = '''lower newer'''
return input_text, output_text
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
__SCREAMING_SNAKE_CASE : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = tokenizer('''Hello''' , '''World''' )
__SCREAMING_SNAKE_CASE : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__SCREAMING_SNAKE_CASE : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__SCREAMING_SNAKE_CASE : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 9 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"\w+[.]\d+"
lowerCamelCase__: int =re.findall(__a , __a )
for pat in pats:
lowerCamelCase__: Optional[int] =key.replace(__a , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__: List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__: Union[str, Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__: int =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__: str =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__: int =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __a , __a , __a=42 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: int ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__: int =flax_model.init_weights(PRNGKey(__a ) )
lowerCamelCase__: Optional[int] =flatten_dict(__a )
lowerCamelCase__: int ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: List[str] =rename_key(__a )
lowerCamelCase__: str =tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__: List[str] =rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Union[str, Any] =jnp.asarray(__a )
return unflatten_dict(__a )
| 10 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
with open(UpperCamelCase__ , encoding="utf_8" ) as f:
_A : str = csv.reader(UpperCamelCase__ )
_A : List[Any] = []
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
_A : int = []
for dataset in encoded_datasets:
_A : Tuple = len(UpperCamelCase__ )
_A : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_A : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa )
_A : Any = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_A : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
_A : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A : int = with_conta
_A : str = with_conta
_A : int = len(UpperCamelCase__ ) - 1
_A : str = len(UpperCamelCase__ ) - 1
_A : List[Any] = with_conta
_A : str = with_conta
_A : Union[str, Any] = mc_label
_A : int = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ():
_A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=UpperCamelCase__ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=UpperCamelCase__ , default="" )
parser.add_argument("--eval_dataset" , type=UpperCamelCase__ , default="" )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=42 )
parser.add_argument("--num_train_epochs" , type=UpperCamelCase__ , default=3 )
parser.add_argument("--train_batch_size" , type=UpperCamelCase__ , default=8 )
parser.add_argument("--eval_batch_size" , type=UpperCamelCase__ , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=UpperCamelCase__ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=UpperCamelCase__ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=UpperCamelCase__ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.01 )
parser.add_argument("--lm_coef" , type=UpperCamelCase__ , default=0.9 )
parser.add_argument("--n_valid" , type=UpperCamelCase__ , default=374 )
parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
_A : Optional[int] = parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_A : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_A : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(UpperCamelCase__ , UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_A : Tuple = ["_start_", "_delimiter_", "_classify_"]
_A : Tuple = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
_A : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
_A : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info("Encoding dataset..." )
_A : Any = load_rocstories_dataset(args.train_dataset )
_A : Union[str, Any] = load_rocstories_dataset(args.eval_dataset )
_A : List[str] = (train_dataset, eval_dataset)
_A : int = tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
_A : str = model.config.n_positions // 2 - 2
_A : Any = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_A : Dict = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_A : Dict = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
_A , _A : List[Any] = tensor_datasets[0], tensor_datasets[1]
_A : Dict = TensorDataset(*UpperCamelCase__ )
_A : Optional[int] = RandomSampler(UpperCamelCase__ )
_A : int = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size )
_A : int = TensorDataset(*UpperCamelCase__ )
_A : List[Any] = SequentialSampler(UpperCamelCase__ )
_A : Optional[Any] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_A : Tuple = args.max_steps
_A : Tuple = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
_A : Any = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
_A : List[str] = list(model.named_parameters() )
_A : List[Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
_A : Dict = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
_A : Union[str, Any] = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
_A : List[Any] = get_linear_schedule_with_warmup(
UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ )
if args.do_train:
_A , _A , _A : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
_A : Tuple = 0
_A : str = 0
_A : Dict = tqdm(UpperCamelCase__ , desc="Training" )
for step, batch in enumerate(UpperCamelCase__ ):
_A : List[Any] = tuple(t.to(UpperCamelCase__ ) for t in batch )
_A , _A , _A , _A : List[str] = batch
_A : int = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
_A : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_A : Any = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_A : Optional[Any] = "Training loss: {:.2e} lr: {:.2e}".format(UpperCamelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_A : List[Any] = model.module if hasattr(UpperCamelCase__ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_A : List[str] = os.path.join(args.output_dir , UpperCamelCase__ )
_A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
torch.save(model_to_save.state_dict() , UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_A : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_A : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
_A , _A : str = 0, 0
_A , _A : Any = 0, 0
for batch in tqdm(UpperCamelCase__ , desc="Evaluating" ):
_A : Union[str, Any] = tuple(t.to(UpperCamelCase__ ) for t in batch )
_A , _A , _A , _A : Any = batch
with torch.no_grad():
_A , _A , _A , _A : Tuple = model(
UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
_A : List[Any] = mc_logits.detach().cpu().numpy()
_A : Optional[Any] = mc_labels.to("cpu" ).numpy()
_A : Union[str, Any] = accuracy(UpperCamelCase__ , UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_A : Any = eval_loss / nb_eval_steps
_A : int = eval_accuracy / nb_eval_examples
_A : Dict = tr_loss / nb_tr_steps if args.do_train else None
_A : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
_A : Union[str, Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(UpperCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , UpperCamelCase__ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 11 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE_: str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
SCREAMING_SNAKE_CASE_: Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE_: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_: Optional[int] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCAmelCase__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(self.tmpdirname , "rag_tokenizer")
SCREAMING_SNAKE_CASE_: Tuple = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
SCREAMING_SNAKE_CASE_: Union[str, Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(lowerCAmelCase__)
rag_tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = RagTokenizer.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = RagTokenizer.from_pretrained("facebook/rag-token-nq")
SCREAMING_SNAKE_CASE_: str = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE_: List[str] = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
SCREAMING_SNAKE_CASE_: Any = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
| 13 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = sorted(numsa + numsa )
A__ , A__ = divmod(len(lowercase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Tuple = [float(x) for x in input("""Enter the elements of first array: """).split()]
_lowerCamelCase : List[Any] = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 14 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "time_series_transformer"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "student_t" ,A : str = "nll" ,A : int = 1 ,A : List[int] = [1, 2, 3, 4, 5, 6, 7] ,A : Optional[Union[str, bool]] = "mean" ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : Optional[List[int]] = None ,A : Optional[List[int]] = None ,A : int = 32 ,A : int = 32 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : bool = True ,A : str = "gelu" ,A : int = 64 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : int = 1_00 ,A : float = 0.02 ,A : Union[str, Any]=True ,**A : Optional[int] ,):
# time series specific configuration
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__A = embedding_dimension
else:
__A = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(A ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=A ,**A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 15 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "rwkv"
__UpperCAmelCase : List[Any] = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any], UpperCAmelCase__ : Any=5_0_2_7_7, UpperCAmelCase__ : Union[str, Any]=1_0_2_4, UpperCAmelCase__ : int=4_0_9_6, UpperCAmelCase__ : str=3_2, UpperCAmelCase__ : int=None, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : Optional[int]=1E-5, UpperCAmelCase__ : str=0, UpperCAmelCase__ : Optional[int]=0, UpperCAmelCase__ : Optional[int]=6, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : int, ):
__lowercase = vocab_size
__lowercase = context_length
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase = layer_norm_epsilon
__lowercase = rescale_every
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__ )
| 17 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a__ ( unittest.TestCase , A__ ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_tool("text-to-speech" )
self.tool.setup()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = self.tool("hey" )
SCREAMING_SNAKE_CASE_ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ),) )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tool("hey" )
SCREAMING_SNAKE_CASE_ : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ),) )
| 18 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 0 |
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = name
lowerCamelCase_ = value
lowerCamelCase_ = weight
def __repr__( self ) -> Dict:
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return self.value
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return self.name
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.weight
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return self.value / self.weight
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ , lowerCamelCase_ = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : str= None
_a : int= BloomTokenizerFast
_a : Optional[Any]= BloomTokenizerFast
_a : Dict= True
_a : str= False
_a : Union[str, Any]= "tokenizer_file"
_a : Union[str, Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_rust_tokenizer()
lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase : int = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : int = tokenizer.batch_encode_plus(snake_case )["""input_ids"""]
self.assertListEqual(snake_case ,snake_case )
lowercase : int = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : str = """This is a simple input"""
lowercase : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : Optional[int] = ("""This is a simple input""", """This is a pair""")
lowercase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase : List[Any] = None # Hotfixing padding = None
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_rust_tokenizer()
lowercase : Optional[Any] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case )
lowercase : Tuple = next(iter(snake_case ) )["""premise"""] # pick up one data
lowercase : Any = list(sample_data.values() )
lowercase : str = list(map(tokenizer.encode ,snake_case ) )
lowercase : Tuple = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 20 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 0 |
import os
from collections.abc import Iterator
def UpperCamelCase_( lowerCamelCase_ = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowerCamelCase_ ):
_lowercase : Union[str, Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase_ , lowerCamelCase_ ).lstrip('./' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
return F'''{i * " "}*''' if i else "\n##"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : int = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase_ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(lowerCamelCase_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def UpperCamelCase_( lowerCamelCase_ = "." ) -> None:
_lowercase : List[str] = ''
for filepath in sorted(good_file_paths(lowerCamelCase_ ) ):
_lowercase , _lowercase : int = os.path.split(lowerCamelCase_ )
if filepath != old_path:
_lowercase : List[str] = print_path(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : List[str] = F'''{filepath}/{filename}'''.replace(' ' , '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'''{md_prefix(lowerCamelCase_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 21 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 0 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
lowerCamelCase__ = ["""accelerate""", """launch"""]
lowerCamelCase__ = Path.home() / """.cache/huggingface/accelerate"""
lowerCamelCase__ = """default_config.yaml"""
lowerCamelCase__ = config_folder / config_file
lowerCamelCase__ = config_folder / """_default_config.yaml"""
lowerCamelCase__ = Path("""tests/test_configs""" )
@classmethod
def A ( cls : int ) -> Any:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def A ( cls : Dict ) -> Optional[int]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def A ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Tuple = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def A ( self : Any ) -> Tuple:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def A ( self : str ) -> Optional[int]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = """test-tpu"""
lowerCamelCase__ = """us-central1-a"""
lowerCamelCase__ = """ls"""
lowerCamelCase__ = ["""accelerate""", """tpu-config"""]
lowerCamelCase__ = """cd /usr/share"""
lowerCamelCase__ = """tests/test_samples/test_command_file.sh"""
lowerCamelCase__ = """Running gcloud compute tpus tpu-vm ssh"""
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__snake_case )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , __snake_case , )
def A ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : List[Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def A ( self : Tuple ) -> Any:
UpperCAmelCase : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def A ( self : int ) -> Any:
UpperCAmelCase : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
| 23 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
snake_case_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str ) -> List[str]:
for attribute in key.split('''.''' ):
__snake_case = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__snake_case = getattr(snake_case_ , snake_case_ ).shape
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[str] ) -> List[str]:
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(snake_case_ )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
else:
__snake_case = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> List[Any]:
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int=None , snake_case_ : str=None , snake_case_ : Optional[int]=True ) -> int:
if config_path is not None:
__snake_case = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
__snake_case = UniSpeechSatConfig()
__snake_case = ''''''
if is_finetuned:
__snake_case = UniSpeechSatForCTC(snake_case_ )
else:
__snake_case = UniSpeechSatForPreTraining(snake_case_ )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__snake_case = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self , _a , _a=2 , _a=3 , _a=4 , _a=2 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=36 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=6 , _a=6 , _a=3 , _a=4 , _a=None , _a=1000 , ) -> int:
_A : Tuple = parent
_A : int = batch_size
_A : Union[str, Any] = num_channels
_A : int = image_size
_A : Tuple = patch_size
_A : Union[str, Any] = is_training
_A : List[Any] = use_input_mask
_A : Tuple = use_token_type_ids
_A : Optional[int] = use_labels
_A : Any = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : int = type_vocab_size
_A : Tuple = type_sequence_label_size
_A : Optional[int] = initializer_range
_A : List[Any] = coordinate_size
_A : Dict = shape_size
_A : Tuple = num_labels
_A : str = num_choices
_A : str = scope
_A : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_A : Union[str, Any] = text_seq_length
_A : Optional[Any] = (image_size // patch_size) ** 2 + 1
_A : List[str] = self.text_seq_length + self.image_seq_length
def a__ ( self ) -> str:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_A : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_A : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A : Union[str, Any] = bbox[i, j, 3]
_A : Optional[int] = bbox[i, j, 1]
_A : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_A : Union[str, Any] = bbox[i, j, 2]
_A : Dict = bbox[i, j, 0]
_A : Union[str, Any] = tmp_coordinate
_A : Any = tf.constant(_a )
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[Any] = None
if self.use_input_mask:
_A : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
_A : List[Any] = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_A : Union[str, Any] = None
_A : Union[str, Any] = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_A : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : str = TFLayoutLMvaModel(config=_a )
# text + image
_A : List[str] = model(_a , pixel_values=_a , training=_a )
_A : Union[str, Any] = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , training=_a , )
_A : int = model(_a , bbox=_a , pixel_values=_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_A : Dict = model(_a , training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_A : Union[str, Any] = model({"""pixel_values""": pixel_values} , training=_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
_A : str = self.num_labels
_A : List[str] = TFLayoutLMvaForSequenceClassification(config=_a )
_A : int = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_A : str = self.num_labels
_A : Any = TFLayoutLMvaForTokenClassification(config=_a )
_A : Union[str, Any] = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , labels=_a , training=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : int = 2
_A : Dict = TFLayoutLMvaForQuestionAnswering(config=_a )
_A : Tuple = model(
_a , bbox=_a , pixel_values=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , training=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> Union[str, Any]:
_A : int = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Any = config_and_inputs
_A : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def a__ ( self , _a , _a , _a , _a , _a ) -> Optional[int]:
return True
def a__ ( self , _a , _a , _a=False ) -> dict:
_A : int = copy.deepcopy(_a )
if model_class in get_values(_a ):
_A : List[Any] = {
k: tf.tile(tf.expand_dims(_a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_a ):
_A : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_A : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_A : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_A : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_a ):
_A : Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def a__ ( self ) -> List[str]:
_A : List[str] = TFLayoutLMvaModelTester(self )
_A : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[str]:
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
if getattr(_a , """hf_compute_loss""" , _a ):
# The number of elements in the loss should be the same as the number of elements in the label
_A : int = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_A : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_a )[0]
]
_A : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_A : Union[str, Any] = prepared_for_class.pop("""input_ids""" )
_A : Tuple = model(_a , **_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_A : Tuple = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_A : Any = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
_A : int = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_A : Tuple = -100
_A : Dict = tf.convert_to_tensor(_a )
_A : Any = model(_a , **_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
_A : int = model(_a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_A : List[str] = self._prepare_for_class(inputs_dict.copy() , _a , return_labels=_a )
# Get keys that were added with the _prepare_for_class function
_A : str = prepared_for_class.keys() - inputs_dict.keys()
_A : Dict = inspect.signature(model.call ).parameters
_A : Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_A : Dict = {0: """input_ids"""}
for label_key in label_keys:
_A : str = signature_names.index(_a )
_A : int = label_key
_A : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_A : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_A : Tuple = prepared_for_class[value]
_A : int = tuple(_a )
# Send to model
_A : List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a__ ( self ) -> Optional[int]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a )
def a__ ( self ) -> Any:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Optional[Any] = type
self.model_tester.create_and_check_model(_a , _a , _a , _a , _a , _a )
def a__ ( self ) -> Tuple:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_a , _a , _a , _a , _a , _a , _a )
def a__ ( self ) -> Tuple:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_a , _a , _a , _a , _a , _a , _a )
def a__ ( self ) -> List[str]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_a , _a , _a , _a , _a , _a , _a )
@slow
def a__ ( self ) -> str:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Tuple = TFLayoutLMvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Tuple:
return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None
@slow
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
_A : Tuple = self.default_image_processor
_A : int = prepare_img()
_A : List[Any] = image_processor(images=_a , return_tensors="""tf""" ).pixel_values
_A : str = tf.constant([[1, 2]] )
_A : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_A : Tuple = model(input_ids=_a , bbox=_a , pixel_values=_a , training=_a )
# verify the logits
_A : int = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _a )
_A : List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ) )
| 26 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowercase : Union[str, Any] = Mapping[str, np.ndarray]
__lowercase : Tuple = Mapping[str, Any] # Is a nested dict.
__lowercase : Any = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase_ )
class __UpperCamelCase :
A_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A_ = None
# Templates used to generate this protein (prediction-only)
A_ = None
# Chain corresponding to each parent
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = r'(\[[A-Z]+\]\n)'
__a : List[str] = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
__a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
__a : List[str] = ["N", "CA", "C"]
__a : Dict = None
__a : Optional[int] = None
__a : Optional[Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
__a : str = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
__a : Union[str, Any] = 'X' # FIXME: strings are immutable
__a : List[Any] = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__a : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
__a : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
__a : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__a : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__a : int = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
__a : str = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ):
__a : List[str] = []
__a : Any = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
__a : Any = prot.parents
__a : Tuple = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__a : Tuple = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
__a : List[str] = ['N/A']
pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ):
__a : List[str] = []
__a : List[str] = pdb_str.split('\n' )
__a : List[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
__a : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__a : List[Any] = []
if prot.parents_chain_index is not None:
__a : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
__a : Any = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__a : str = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__a : List[str] = [['N/A']]
def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str:
return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__a : int = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
__a : Optional[int] = parents_per_chain[chain_counter]
else:
__a : int = ['N/A']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ):
__a : Union[str, Any] = residue_constants.restypes + ['X']
def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
__a : Any = residue_constants.atom_types
__a : List[str] = []
__a : str = prot.atom_mask
__a : str = prot.aatype
__a : str = prot.atom_positions
__a : Union[str, Any] = prot.residue_index.astype(np.intaa )
__a : int = prot.b_factors
__a : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__a : Tuple = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
__a : List[Any] = aatype.shape[0]
__a : Any = 1
__a : List[Any] = 0
__a : Optional[Any] = string.ascii_uppercase
__a : Any = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__a : int = 'ATOM'
__a : Tuple = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}"""
__a : Union[str, Any] = ''
__a : Tuple = ''
__a : str = 1.0_0
__a : Union[str, Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
__a : str = ''
__a : List[Any] = 'A'
if chain_index is not None:
__a : Any = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__a : int = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
__a : Union[str, Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__a : Tuple = True
__a : Dict = chain_index[i + 1]
if should_terminate:
# Close the chain.
__a : int = 'TER'
__a : List[Any] = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 27 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> float:
"""simple docstring"""
UpperCamelCase = 0.00
UpperCamelCase = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(A__ )
first_sum += 1 / float(A__ )
index += 1
return 1 / first_sum
def __lowerCamelCase ( A__ ) -> float:
"""simple docstring"""
UpperCamelCase = 0.00
UpperCamelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase = F"""Resistor at index {index} has a negative value!"""
raise ValueError(A__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'''
def _lowercase ( self : List[str] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=(4, 4, 6_4, 6_4) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Any="CompVis/stable-diffusion-v1-4" ) -> str:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = '''bf16''' if fpaa else None
lowercase_ , lowercase_ = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='''unet''' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Dict=(4, 7_7, 7_6_8) , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Dict:
lowercase_ = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ , lowercase_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 9_6, 9_6) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 7_7, 1_0_2_4) , fpaa=SCREAMING_SNAKE_CASE_ )
lowercase_ = model.apply(
{'''params''': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
lowercase_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
| 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 1000 , A : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
_UpperCAmelCase : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase : int = 4
# running values
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : int , A : Union[str, torch.device] = None ):
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase : Dict = timesteps.to(A )
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase : Optional[Any] = timestep_index + 1
_UpperCAmelCase : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
_UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase : Union[str, Any] = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Dict ):
return sample
def _A ( self : Optional[Any] , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] ):
_UpperCAmelCase : List[str] = self.alphas[timestep_index]
_UpperCAmelCase : List[Any] = self.betas[timestep_index]
_UpperCAmelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCAmelCase : Dict = self.betas[prev_timestep_index]
_UpperCAmelCase : Tuple = (sample - sigma * ets) / max(A , 1E-8 )
_UpperCAmelCase : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 31 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 0 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
a_ : str = 1
a_ : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 10, 9
UpperCAmelCase_ : List[Any] = defaultdict(list)
UpperCAmelCase_ : dict[int, bool] = {}
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 32 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : list ):
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
lowercase_ : int = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Dict = unsorted[j - 1], unsorted[j]
lowercase_ : List[str] = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : int = unsorted[j + 1], unsorted[j]
lowercase_ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__A : str = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 33 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 0 |
'''simple docstring'''
def snake_case_ (_a : int ):
if not isinstance(_a , _a ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
snake_case__ : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 2
while digits < n:
index += 1
snake_case__ : Any = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if any(not isinstance(UpperCamelCase , UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 37 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase_ : int = '''bart'''
UpperCAmelCase_ : int = True
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase :Optional[int] = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase :Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase :Optional[Any] = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase :Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Tuple = faiss.StandardGpuResources()
UpperCamelCase :Dict = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase :str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 )
UpperCamelCase :Optional[int] = faiss.index_cpu_to_gpu(__magic_name__ , 1 , __magic_name__ )
wikiaab_gpu_index_flat.add(__magic_name__ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase :Optional[Any] = (None, None)
UpperCamelCase :List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
UpperCamelCase :Dict = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase :List[Any] = elia["""train_eli5"""]
UpperCamelCase :Optional[Any] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase :Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__magic_name__ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_indexes()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = load_models()
UpperCAmelCase_ , UpperCAmelCase_ : str = load_train_data()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any=10 ) -> Any:
"""simple docstring"""
UpperCamelCase :List[str] = embed_questions_for_retrieval([question] , __magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase :int = eli5_train_q_index.search(__magic_name__ , __magic_name__ )
UpperCamelCase :Any = [elia_train[int(__magic_name__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str]="wiki40b" , __magic_name__ : str="dense" , __magic_name__ : Tuple=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase :Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase :List[Any] = query_qa_dense_index(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
UpperCamelCase , UpperCamelCase :List[Any] = query_es_index(
__magic_name__ , __magic_name__ , index_name="""english_wiki40b_snippets_100w""" , n_results=__magic_name__ , )
UpperCamelCase :str = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase :Tuple = """question: {} context: {}""".format(__magic_name__ , __magic_name__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __magic_name__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __magic_name__ : None),
} )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=64 , __magic_name__ : int=256 , __magic_name__ : Dict=False , __magic_name__ : str=2 , __magic_name__ : str=0.95 , __magic_name__ : Dict=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase :Optional[Any] = qa_sas_generate(
__magic_name__ , __magic_name__ , __magic_name__ , num_answers=1 , num_beams=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ , do_sample=__magic_name__ , temp=__magic_name__ , top_p=__magic_name__ , top_k=__magic_name__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCAmelCase_ : List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCAmelCase_ : Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase_ : List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase_ : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCAmelCase_ : Tuple = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCAmelCase_ : str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCAmelCase_ : str = action_list.index(action_st)
UpperCAmelCase_ : int = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCAmelCase_ : str = show_type == '''Show full text of passages'''
else:
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCAmelCase_ : Any = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCAmelCase_ : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCAmelCase_ : Optional[Any] = '''wiki40b'''
UpperCAmelCase_ : Any = '''dense'''
UpperCAmelCase_ : int = '''beam'''
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 2_56
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCAmelCase_ : Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCAmelCase_ : int = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase_ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Optional[int] = None
# start main text
UpperCAmelCase_ : Optional[int] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCAmelCase_ : List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase_ : Union[str, Any] = st.text_input('''Enter your question here:''', '''''')
else:
UpperCAmelCase_ : Optional[Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase_ , UpperCAmelCase_ : Dict = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCAmelCase_ : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase_ : Any = support_list[:10]
UpperCAmelCase_ : Optional[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCAmelCase_ : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCAmelCase_ : List[Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase_ : Union[str, Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCAmelCase_ : str = sec_titles.split(''' & ''')
UpperCAmelCase_ : str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase_ : Any = find_nearest_training(question)
UpperCAmelCase_ : Optional[int] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCAmelCase_ : Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCAmelCase_ : int = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 38 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 0 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowercase = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = """facebook/nllb-200-distilled-600M"""
UpperCAmelCase : Dict = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
UpperCAmelCase : Any = """translator"""
UpperCAmelCase : Optional[int] = AutoTokenizer
UpperCAmelCase : str = AutoModelForSeqaSeqLM
UpperCAmelCase : List[Any] = LANGUAGE_CODES
UpperCAmelCase : Tuple = ["""text""", """text""", """text"""]
UpperCAmelCase : List[str] = ["""text"""]
def __snake_case ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''')
a : Dict = self.lang_to_code[src_lang]
a : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__UpperCAmelCase , return_tensors="pt" , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : int):
return self.model.generate(**__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Union[str, Any]):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__UpperCAmelCase)
| 40 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
lowerCamelCase__ : Any = str(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = """""".join(sorted(UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Tuple = 1
while True:
if check_bouncy(UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 41 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=[1, 2, 1] , lowerCAmelCase_=[2, 2, 4] , lowerCAmelCase_=2 , lowerCAmelCase_=2.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=10 , lowerCAmelCase_=8 , lowerCAmelCase_=["stage1", "stage2", "stage3"] , lowerCAmelCase_=[1, 2, 3] , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = patch_norm
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = is_training
_snake_case = scope
_snake_case = use_labels
_snake_case = type_sequence_label_size
_snake_case = encoder_stride
_snake_case = out_features
_snake_case = out_indices
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskFormerSwinModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
_snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskFormerSwinBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase_ ):
_snake_case = ['stem']
_snake_case = MaskFormerSwinBackbone(config=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__lowercase = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MaskFormerSwinModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self ):
"""simple docstring"""
return
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# Swin has a different seq_length
_snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase_ ):
_snake_case = 0
return t
def check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_={} ):
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = model(**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase_ ) , set_nan_tensor_to_zero(lowerCAmelCase_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(lowerCAmelCase_ ).any()} and `inf`: {torch.isinf(lowerCAmelCase_ )}. Dict has'
F' `nan`: {torch.isnan(lowerCAmelCase_ ).any()} and `inf`: {torch.isinf(lowerCAmelCase_ )}.'
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {'output_hidden_states': True} )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {'output_hidden_states': True} )
@require_torch
class __UpperCAmelCase ( unittest.TestCase , _lowerCamelCase ):
__lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__lowercase = MaskFormerSwinConfig
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MaskFormerSwinModelTester(self )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_snake_case = backbone_class(lowerCAmelCase_ )
backbone.to(lowerCAmelCase_ )
backbone.eval()
_snake_case = backbone(**lowerCAmelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_snake_case = backbone(**lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_snake_case , _snake_case , _snake_case = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_snake_case = backbone(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertIsNotNone(outputs.attentions )
| 42 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """marian"""
a__ : str = ["""past_key_values"""]
a__ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=58_101 , __lowercase=None , __lowercase=1_024 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=12 , __lowercase=4_096 , __lowercase=16 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase=True , __lowercase="gelu" , __lowercase=1_024 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=58_100 , __lowercase=False , __lowercase=58_100 , __lowercase=0 , __lowercase=0 , __lowercase=True , **__lowercase , ) -> Union[str, Any]:
__UpperCamelCase :List[Any] = vocab_size
__UpperCamelCase :Dict = decoder_vocab_size or vocab_size
__UpperCamelCase :Union[str, Any] = max_position_embeddings
__UpperCamelCase :List[Any] = d_model
__UpperCamelCase :Optional[int] = encoder_ffn_dim
__UpperCamelCase :Tuple = encoder_layers
__UpperCamelCase :Union[str, Any] = encoder_attention_heads
__UpperCamelCase :Dict = decoder_ffn_dim
__UpperCamelCase :Union[str, Any] = decoder_layers
__UpperCamelCase :Union[str, Any] = decoder_attention_heads
__UpperCamelCase :int = dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :List[Any] = activation_dropout
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Union[str, Any] = init_std
__UpperCamelCase :List[str] = encoder_layerdrop
__UpperCamelCase :List[Any] = decoder_layerdrop
__UpperCamelCase :str = use_cache
__UpperCamelCase :Dict = encoder_layers
__UpperCamelCase :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase :Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase :Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCamelCase :int = {0: '''batch'''}
__UpperCamelCase :Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
__UpperCamelCase :List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase :List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCamelCase , __UpperCamelCase :List[str] = self.num_layers
for i in range(__lowercase):
__UpperCamelCase :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCamelCase :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__UpperCamelCase :int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase :List[str] = super().outputs
else:
__UpperCamelCase :Any = super(__lowercase , self).outputs
if self.use_past:
__UpperCamelCase , __UpperCamelCase :Dict = self.num_layers
for i in range(__lowercase):
__UpperCamelCase :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCamelCase :Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Generate decoder inputs
__UpperCamelCase :str = seq_length if not self.use_past else 1
__UpperCamelCase :List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :List[Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase :Union[str, Any] = dict(**__lowercase , **__lowercase)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :List[str] = common_inputs['''input_ids'''].shape
__UpperCamelCase :Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.num_attention_heads
__UpperCamelCase :Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase :Optional[int] = decoder_seq_length + 3
__UpperCamelCase :Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase :Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase)] , dim=1)
__UpperCamelCase :Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.num_layers
__UpperCamelCase :Dict = min(__lowercase , __lowercase)
__UpperCamelCase :Dict = max(__lowercase , __lowercase) - min_num_layers
__UpperCamelCase :str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase),
torch.zeros(__lowercase),
torch.zeros(__lowercase),
torch.zeros(__lowercase),
))
# TODO: test this.
__UpperCamelCase :Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase):
common_inputs["past_key_values"].append((torch.zeros(__lowercase), torch.zeros(__lowercase)))
return common_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :int = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :Tuple = seqlen + 2
__UpperCamelCase , __UpperCamelCase :List[str] = self.num_layers
__UpperCamelCase , __UpperCamelCase :Tuple = self.num_attention_heads
__UpperCamelCase :Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase :Optional[Any] = common_inputs['''attention_mask'''].dtype
__UpperCamelCase :int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
__UpperCamelCase :Union[str, Any] = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(__lowercase)
]
return common_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCamelCase :Optional[Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase :Optional[int] = tokenizer.num_special_tokens_to_add(__lowercase)
__UpperCamelCase :List[Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase)
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase :Any = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
__UpperCamelCase :Optional[Any] = dict(tokenizer(__lowercase , return_tensors=__lowercase))
return common_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase :Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
else:
__UpperCamelCase :Dict = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
return common_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase :Optional[int] = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase)
else:
__UpperCamelCase :List[str] = super(__lowercase , self)._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase)
@property
def UpperCamelCase__ ( self) -> float:
return 1E-4
| 43 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : Dict = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['BeitFeatureExtractor']
_a : Dict = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=0 ) -> str:
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[column] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=float('''inf''' ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
__a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__a = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=float('''inf''' ) ) -> Union[str, Any]:
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
__a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__a = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
__a = points_counts // 2
__a = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
__a = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
__a = min(lowerCAmelCase__ , lowerCAmelCase__ )
__a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
__a = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
__a = column_based_sort(lowerCAmelCase__ , column=0 )
__a = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowercase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 45 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _snake_case ( self , **lowercase ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> int:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowercase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowercase )
lowerCAmelCase = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowercase )
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 46 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
'''simple docstring'''
import cva
import numpy as np
class A__ :
def __init__( self : Tuple , _a : float , _a : int ) -> List[Any]:
'''simple docstring'''
if k in (0.04, 0.06):
_SCREAMING_SNAKE_CASE =k
_SCREAMING_SNAKE_CASE =window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ) -> str:
'''simple docstring'''
return str(self.k )
def A ( self : Optional[int] , _a : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =cva.imread(_a , 0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =img.shape
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =img.copy()
_SCREAMING_SNAKE_CASE =cva.cvtColor(_a , cva.COLOR_GRAY2RGB )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.gradient(_a )
_SCREAMING_SNAKE_CASE =dx**2
_SCREAMING_SNAKE_CASE =dy**2
_SCREAMING_SNAKE_CASE =dx * dy
_SCREAMING_SNAKE_CASE =0.04
_SCREAMING_SNAKE_CASE =self.window_size // 2
for y in range(_a , h - offset ):
for x in range(_a , w - offset ):
_SCREAMING_SNAKE_CASE =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =(wxx * wyy) - (wxy**2)
_SCREAMING_SNAKE_CASE =wxx + wyy
_SCREAMING_SNAKE_CASE =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : str = HarrisCorner(0.0_4, 3)
lowerCamelCase , lowerCamelCase : Union[str, Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 47 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ : Dict = 'base_with_context'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : List[str] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Dict = ly_weight["attention"]
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : Optional[Any] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Union[str, Any] = ly_weight["attention"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase : int = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = ly_weight["self_attention"]
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : str = ly_weight["MultiHeadDotProductAttention_0"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : str = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase : Union[str, Any] = jnp.tree_util.tree_map(onp.array ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCamelCase : Tuple = os.path.join(args.checkpoint_path ,".." ,"config.gin" )
lowerCamelCase : Optional[Any] = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = inference.InferenceModel(args.checkpoint_path ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ,variance_type="fixed_large" )
lowerCamelCase : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["targets_context"] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["targets_context"] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
lowerCamelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = load_decoder(ta_checkpoint["target"]["decoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE ,continuous_encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,melgan=_SCREAMING_SNAKE_CASE ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 48 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=18 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=True , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size_divisor
__a = do_rescale
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Tuple = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = GLPNImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size_divisor'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''resample'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale'''))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 49 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , UpperCAmelCase : List[str]=(64,) , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : List[str]="silu" , UpperCAmelCase : Tuple=True , ) -> Tuple:
super().__init__()
lowerCamelCase__ : Optional[Any] = layers_per_block
lowerCamelCase__ : Dict = torch.nn.Convad(
UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] )
# down
lowerCamelCase__ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = output_channel
lowerCamelCase__ : Dict = block_out_channels[i]
lowerCamelCase__ : int = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : int = get_down_block(
UpperCAmelCase , num_layers=self.layers_per_block , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# out
lowerCamelCase__ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase , eps=1e-6 )
lowerCamelCase__ : List[Any] = nn.SiLU()
lowerCamelCase__ : List[str] = 2 * out_channels if double_z else out_channels
lowerCamelCase__ : Any = nn.Convad(block_out_channels[-1] , UpperCAmelCase , 3 , padding=1 )
lowerCamelCase__ : List[Any] = False
def A_ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ : Dict = x
lowerCamelCase__ : Dict = self.conv_in(UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Dict ):
def custom_forward(*UpperCAmelCase : Any ):
return module(*UpperCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowerCamelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
# middle
lowerCamelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
for down_block in self.down_blocks:
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase )
# middle
lowerCamelCase__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase )
# middle
lowerCamelCase__ : List[str] = self.mid_block(UpperCAmelCase )
# post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.conv_act(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.conv_out(UpperCAmelCase )
return sample
class lowerCAmelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : int=("UpDecoderBlock2D",) , UpperCAmelCase : Optional[Any]=(64,) , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : int="silu" , UpperCAmelCase : List[str]="group" , ) -> Any:
super().__init__()
lowerCamelCase__ : List[str] = layers_per_block
lowerCamelCase__ : Optional[Any] = nn.Convad(
UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Dict = in_channels if norm_type == 'spatial' else None
# mid
lowerCamelCase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# up
lowerCamelCase__ : Tuple = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : List[Any] = reversed_block_out_channels[i]
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Union[str, Any] = get_up_block(
UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , resnet_time_scale_shift=UpperCAmelCase , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : str = output_channel
# out
if norm_type == "spatial":
lowerCamelCase__ : Any = SpatialNorm(block_out_channels[0] , UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase , eps=1e-6 )
lowerCamelCase__ : Tuple = nn.SiLU()
lowerCamelCase__ : Union[str, Any] = nn.Convad(block_out_channels[0] , UpperCAmelCase , 3 , padding=1 )
lowerCamelCase__ : Dict = False
def A_ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any=None ) -> List[str]:
lowerCamelCase__ : Any = z
lowerCamelCase__ : Union[str, Any] = self.conv_in(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Any ):
def custom_forward(*UpperCAmelCase : Any ):
return module(*UpperCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
lowerCamelCase__ : Tuple = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
# middle
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
else:
# middle
lowerCamelCase__ : Any = self.mid_block(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : List[Any] = up_block(UpperCAmelCase , UpperCAmelCase )
# post-process
if latent_embeds is None:
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = self.conv_norm_out(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = self.conv_act(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.conv_out(UpperCAmelCase )
return sample
class lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]="random" , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True ) -> Optional[int]:
super().__init__()
lowerCamelCase__ : int = n_e
lowerCamelCase__ : Any = vq_embed_dim
lowerCamelCase__ : Optional[int] = beta
lowerCamelCase__ : Optional[Any] = legacy
lowerCamelCase__ : Any = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase__ : Any = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase__ : str = self.used.shape[0]
lowerCamelCase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase__ : List[Any] = self.re_embed
lowerCamelCase__ : List[Any] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCamelCase__ : List[Any] = n_e
lowerCamelCase__ : Optional[Any] = sane_index_shape
def A_ ( self : Union[str, Any] , UpperCAmelCase : Any ) -> Dict:
lowerCamelCase__ : Any = inds.shape
assert len(UpperCAmelCase ) > 1
lowerCamelCase__ : str = inds.reshape(ishape[0] , -1 )
lowerCamelCase__ : str = self.used.to(UpperCAmelCase )
lowerCamelCase__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase__ : Optional[int] = match.argmax(-1 )
lowerCamelCase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase__ : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase__ : int = self.unknown_index
return new.reshape(UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCamelCase__ : List[Any] = inds.shape
assert len(UpperCAmelCase ) > 1
lowerCamelCase__ : List[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase__ : str = self.used.to(UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase__ : Any = 0 # simply set to zero
lowerCamelCase__ : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase )
return back.reshape(UpperCAmelCase )
def A_ ( self : Tuple , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase__ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase__ : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase__ : List[str] = torch.argmin(torch.cdist(UpperCAmelCase , self.embedding.weight ) , dim=1 )
lowerCamelCase__ : Optional[Any] = self.embedding(UpperCAmelCase ).view(z.shape )
lowerCamelCase__ : int = None
lowerCamelCase__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase__ : Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase__ : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase__ : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase__ : Union[str, Any] = self.remap_to_used(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase__ : Any = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A_ ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> List[Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase__ : str = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase__ : Optional[Any] = self.unmap_to_all(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase__ : str = self.embedding(UpperCAmelCase )
if shape is not None:
lowerCamelCase__ : List[str] = z_q.view(UpperCAmelCase )
# reshape back to match original input shape
lowerCamelCase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> List[str]:
lowerCamelCase__ : List[str] = parameters
lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(UpperCAmelCase , 2 , dim=1 )
lowerCamelCase__ : Union[str, Any] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
lowerCamelCase__ : List[Any] = deterministic
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * self.logvar )
lowerCamelCase__ : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase__ : str = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A_ ( self : List[str] , UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase__ : Any = randn_tensor(
self.mean.shape , generator=UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase__ : Optional[int] = self.mean + self.std * sample
return x
def A_ ( self : Dict , UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase__ : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[str]:
return self.mean
| 50 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 0 |
def A (__A : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__A , __A ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__A ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_UpperCAmelCase :bool = field(default=__snake_case , metadata={'help': 'Whether to SortishSamler or not.'} )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_UpperCAmelCase :bool = field(default=__snake_case , metadata={'help': 'whether to use adafactor'} )
_UpperCAmelCase :Optional[float] = field(
default=__snake_case , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_UpperCAmelCase :Optional[float] = field(
default=__snake_case , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_UpperCAmelCase :Optional[float] = field(default=__snake_case , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_UpperCAmelCase :Optional[float] = field(
default=__snake_case , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_UpperCAmelCase :Optional[str] = field(
default='linear' , metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 52 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a__ : Optional[int] ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase_ )
env_command_parser(subparsers=lowerCAmelCase_ )
launch_command_parser(subparsers=lowerCAmelCase_ )
tpu_command_parser(subparsers=lowerCAmelCase_ )
test_command_parser(subparsers=lowerCAmelCase_ )
# Let's go
__SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(lowerCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 54 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = int(UpperCAmelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = divmod(UpperCAmelCase_ , 2 )
return binary_recursive(UpperCAmelCase_ ) + str(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = str(UpperCAmelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCamelCase_ = "-" if number.startswith("-" ) else ""
lowerCamelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'''{negative}0b{binary_recursive(int(UpperCAmelCase_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a : Tuple = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a : Optional[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def A_ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def A_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : str=None , lowercase_ : Any="auto" , lowercase_ : Union[str, Any]=-1 , lowercase_ : Dict=0.9 , lowercase_ : Optional[int]=5 , lowercase_ : Tuple=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : Optional[int]=-1 , lowercase_ : Any=1024 , lowercase_ : List[str]=25 , lowercase_ : Any=5 , lowercase_ : Union[str, Any]=True , lowercase_ : str=25 , ):
snake_case_ = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 56 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
"""simple docstring"""
from math import ceil
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(0 , _UpperCamelCase ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(_UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_UpperCamelCase )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(_UpperCamelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_UpperCamelCase ) )
if len(_UpperCamelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_UpperCamelCase ) )
if len(_UpperCamelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_UpperCamelCase ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(_UpperCamelCase ) )
__lowerCAmelCase = int(ceil(n_layers / len(_UpperCamelCase ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0 , _UpperCamelCase , _UpperCamelCase )]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
| 57 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase_ = True
except ImportError:
lowercase_ = False
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( __lowerCamelCase : Namespace ) ->Optional[Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
def snake_case_( A ) -> Tuple:
_SCREAMING_SNAKE_CASE = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=A , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=A , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=A )
def __init__( self , A , A , A=None , *A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = testing
_SCREAMING_SNAKE_CASE = testing_file
_SCREAMING_SNAKE_CASE = path
def snake_case_( self ) -> List[str]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_SCREAMING_SNAKE_CASE = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(A ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
_SCREAMING_SNAKE_CASE = (
Path(A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_SCREAMING_SNAKE_CASE = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
_SCREAMING_SNAKE_CASE = json.load(A )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A , extra_context=A , )
_SCREAMING_SNAKE_CASE = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
_SCREAMING_SNAKE_CASE = json.load(A )
_SCREAMING_SNAKE_CASE = configuration["""lowercase_modelname"""]
_SCREAMING_SNAKE_CASE = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'{directory}/configuration.json' )
_SCREAMING_SNAKE_CASE = """PyTorch""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = """Flax""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A , exist_ok=A )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=A )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(A ):
with open(A , """r""" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
with open(A , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A , A , A ):
# Create temp file
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = mkstemp()
_SCREAMING_SNAKE_CASE = False
with fdopen(A , """w""" ) as new_file:
with open(A ) as old_file:
for line in old_file:
new_file.write(A )
if line_to_copy_below in line:
_SCREAMING_SNAKE_CASE = True
for line_to_copy in lines_to_copy:
new_file.write(A )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A , A )
# Remove original file
remove(A )
# Move new file
move(A , A )
def skip_units(A ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A ):
with open(A ) as datafile:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE = line.split("""\"""" )[1]
_SCREAMING_SNAKE_CASE = skip_units(A )
elif "# Below: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE = line.split("""\"""" )[1]
_SCREAMING_SNAKE_CASE = skip_units(A )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A , A , A )
_SCREAMING_SNAKE_CASE = []
elif "# Replace with" in line and "##" not in line:
_SCREAMING_SNAKE_CASE = []
elif "##" not in line:
lines_to_copy.append(A )
remove(A )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A )
| 58 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 0 |
import argparse
from collections import defaultdict
import yaml
__lowerCamelCase = """docs/source/en/_toctree.yml"""
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : Any = defaultdict(__lowerCamelCase )
snake_case : List[Any] = []
snake_case : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(__lowerCamelCase )
snake_case : List[str] = new_doc_list
snake_case : Optional[int] = [key for key, value in counts.items() if value > 1]
snake_case : Any = []
for duplicate_key in duplicates:
snake_case : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
snake_case : Union[str, Any] = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(__lowerCamelCase )
# Sort
return overview_doc
def UpperCamelCase ( __lowerCamelCase : Any=False ):
with open(__lowerCamelCase , encoding="utf-8" ) as f:
snake_case : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : str = content[api_idx]["sections"]
# Then to the model doc
snake_case : Optional[int] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case : Dict = api_doc[scheduler_idx]["sections"]
snake_case : Any = clean_doc_toc(__lowerCamelCase )
snake_case : str = False
if new_scheduler_doc != scheduler_doc:
snake_case : Dict = True
if overwrite:
snake_case : int = new_scheduler_doc
if diff:
if overwrite:
snake_case : Dict = api_doc
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def UpperCamelCase ( __lowerCamelCase : int=False ):
with open(__lowerCamelCase , encoding="utf-8" ) as f:
snake_case : str = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : List[Any] = content[api_idx]["sections"]
# Then to the model doc
snake_case : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case : int = False
snake_case : Optional[int] = api_doc[pipeline_idx]["sections"]
snake_case : Any = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case : str = pipeline_doc["section"]
snake_case : Optional[int] = clean_doc_toc(__lowerCamelCase )
if overwrite:
snake_case : Any = new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCamelCase )
# sort overall pipeline doc
snake_case : Tuple = clean_doc_toc(__lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
snake_case : Optional[int] = True
if overwrite:
snake_case : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
snake_case : int = api_doc
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 59 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = KandinskyVaaImgaImgPipeline
__UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__UpperCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase = False
@property
def lowerCamelCase__ ( self : str ):
return 3_2
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 3_2
@property
def lowerCamelCase__ ( self : List[Any] ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1_0_0
@property
def lowerCamelCase__ ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase : List[str] = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase__ ( self : str ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.dummy_unet
lowerCAmelCase : Any = self.dummy_movq
lowerCAmelCase : Dict = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase : Optional[int] = DDIMScheduler(**UpperCamelCase_ )
lowerCAmelCase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0 ):
lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Tuple = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = '''cpu'''
lowerCAmelCase : int = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Any = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase : Tuple = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase : Union[str, Any] = '''A red cartoon frog, 4k'''
lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase : Dict = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase, lowerCAmelCase : Dict = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase : List[str] = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
lowerCAmelCase : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 60 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase = 3, __lowerCamelCase = 7, __lowerCamelCase = 100_0000 ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 1
for current_denominator in range(1, limit + 1 ):
UpperCAmelCase_ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : List[Any] = current_numerator
UpperCAmelCase_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 61 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 0 |
import sys
from collections import defaultdict
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase =[]
def _a ( self , A_ ) -> List[Any]:
return self.node_position[vertex]
def _a ( self , A_ , A_ ) -> List[Any]:
__UpperCamelCase =pos
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__UpperCamelCase =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__UpperCamelCase =2 * start + 1
else:
__UpperCamelCase =2 * start + 2
if heap[smallest_child] < heap[start]:
__UpperCamelCase , __UpperCamelCase =heap[smallest_child], positions[smallest_child]
__UpperCamelCase , __UpperCamelCase =(
heap[start],
positions[start],
)
__UpperCamelCase , __UpperCamelCase =temp, tempa
__UpperCamelCase =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , A_ )
self.top_to_bottom(A_ , A_ , A_ , A_ )
def _a ( self , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =position[index]
while index != 0:
__UpperCamelCase =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__UpperCamelCase =heap[parent]
__UpperCamelCase =position[parent]
self.set_position(position[parent] , A_ )
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , A_ )
break
__UpperCamelCase =parent
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , 0 )
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase =len(A_ ) // 2 - 1
for i in range(A_ , -1 , -1 ):
self.top_to_bottom(A_ , A_ , len(A_ ) , A_ )
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =positions[0]
__UpperCamelCase =sys.maxsize
self.top_to_bottom(A_ , 0 , len(A_ ) , A_ )
return temp
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =Heap()
__UpperCamelCase =[0] * len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[-1] * len(SCREAMING_SNAKE_CASE__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__UpperCamelCase =[] # Heap of Distance of vertices from their neighboring vertex
__UpperCamelCase =[]
for vertex in range(len(SCREAMING_SNAKE_CASE__ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE__ )
heap.node_position.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =1
__UpperCamelCase =sys.maxsize
for neighbor, distance in adjacency_list[0]:
__UpperCamelCase =0
__UpperCamelCase =distance
heap.heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =heap.delete_minimum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__UpperCamelCase =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE__ )]
):
__UpperCamelCase =distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE__ , heap.get_position(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_A = int(input('Enter number of edges: ').strip())
_A = defaultdict(list)
for _ in range(edges_number):
_A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 62 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Tuple = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ : Union[str, Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCAmelCase_ : int = '▁'
# Segments (not really needed)
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Optional[Any] = 2
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : List[str] = 4
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a ='left'
__a =XLNetTokenizer
def __init__( self : Optional[int] , __a : Optional[int]=None , __a : str=None , __a : Any=False , __a : List[Any]=True , __a : List[Any]=False , __a : List[str]="<s>" , __a : Optional[Any]="</s>" , __a : str="<unk>" , __a : Union[str, Any]="<sep>" , __a : List[str]="<pad>" , __a : int="<cls>" , __a : Tuple="<mask>" , __a : Any=["<eop>", "<eod>"] , **__a : int , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
_a = 3
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 63 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "layoutlmv3"
def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = version.parse("1.12" )
@property
def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def A__ ( self: Any ) -> float:
return 1e-5
@property
def A__ ( self: int ) -> int:
return 12
def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) )
return inputs
| 345 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]="pt" ):
"""simple docstring"""
_snake_case : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {}
_snake_case : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any=None , ):
"""simple docstring"""
_snake_case : List[Any] = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowercase( __a ):
'''simple docstring'''
def __init__( self: str, a_: Any, a_: Union[str, Any], a_: Dict, a_: Dict, a_: int="train", a_: Any=None, a_: Any=None, a_: Any=None, a_: Tuple="", ):
'''simple docstring'''
super().__init__()
_snake_case : str = Path(a_ ).joinpath(type_path + """.source""" )
_snake_case : Optional[int] = Path(a_ ).joinpath(type_path + """.target""" )
_snake_case : Optional[Any] = self.get_char_lens(self.src_file )
_snake_case : Tuple = max_source_length
_snake_case : Any = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
_snake_case : List[str] = tokenizer
_snake_case : Union[str, Any] = prefix
if n_obs is not None:
_snake_case : List[Any] = self.src_lens[:n_obs]
_snake_case : Dict = src_lang
_snake_case : List[Any] = tgt_lang
def __len__( self: Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: Union[str, Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = index + 1 # linecache starts at 1
_snake_case : Optional[Any] = self.prefix + linecache.getline(str(self.src_file ), a_ ).rstrip("""\n""" )
_snake_case : int = linecache.getline(str(self.tgt_file ), a_ ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer, a_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, a_ ) else self.tokenizer
)
_snake_case : str = self.tokenizer.generator if isinstance(self.tokenizer, a_ ) else self.tokenizer
_snake_case : Tuple = encode_line(a_, a_, self.max_source_length, """right""" )
_snake_case : Optional[Any] = encode_line(a_, a_, self.max_target_length, """right""" )
_snake_case : Optional[int] = source_inputs["""input_ids"""].squeeze()
_snake_case : Optional[Any] = target_inputs["""input_ids"""].squeeze()
_snake_case : List[Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase_ ( a_: Union[str, Any] ):
'''simple docstring'''
return [len(a_ ) for x in Path(a_ ).open().readlines()]
def UpperCamelCase_ ( self: Optional[int], a_: List[str] ):
'''simple docstring'''
_snake_case : Any = torch.stack([x["""input_ids"""] for x in batch] )
_snake_case : Optional[int] = torch.stack([x["""attention_mask"""] for x in batch] )
_snake_case : Tuple = torch.stack([x["""decoder_input_ids"""] for x in batch] )
_snake_case : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, a_ )
else self.tokenizer.pad_token_id
)
_snake_case : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, a_ )
else self.tokenizer.pad_token_id
)
_snake_case : Optional[int] = trim_batch(a_, a_ )
_snake_case , _snake_case : List[Any] = trim_batch(a_, a_, attention_mask=a_ )
_snake_case : Tuple = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
A_ = getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Any = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Any=4 , **snake_case__ : str ):
"""simple docstring"""
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = git.Repo(search_parent_directories=snake_case__ )
_snake_case : List[str] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ (snake_case__ : Callable , snake_case__ : Iterable ):
"""simple docstring"""
return list(map(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
with open(snake_case__ , """wb""" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
def remove_articles(snake_case__ : Optional[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ )
def white_space_fix(snake_case__ : Dict ):
return " ".join(text.split() )
def remove_punc(snake_case__ : Any ):
_snake_case : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = normalize_answer(snake_case__ ).split()
_snake_case : Tuple = normalize_answer(snake_case__ ).split()
_snake_case : List[str] = Counter(snake_case__ ) & Counter(snake_case__ )
_snake_case : Dict = sum(common.values() )
if num_same == 0:
return 0
_snake_case : List[str] = 1.0 * num_same / len(snake_case__ )
_snake_case : Optional[int] = 1.0 * num_same / len(snake_case__ )
_snake_case : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
assert len(snake_case__ ) == len(snake_case__ )
_snake_case : Union[str, Any] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
_snake_case : int = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
_snake_case : str = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 64 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A ( UpperCAmelCase_ ):
def __init__(self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : str=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=9_9 , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : str=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=6_4 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : Optional[Any]=1_6 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Dict=1 , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
UpperCAmelCase__ = q_groups
UpperCAmelCase__ = k_groups
UpperCAmelCase__ = v_groups
UpperCAmelCase__ = post_attention_groups
UpperCAmelCase__ = intermediate_groups
UpperCAmelCase__ = output_groups
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase_ (self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = SqueezeBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = SqueezeBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = SqueezeBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = SqueezeBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = SqueezeBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : Any = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = False
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = SqueezeBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase )
@slow
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SqueezeBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
UpperCAmelCase__ = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 3) )
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
| 65 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spiece.model"}
__a = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__a = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
__a = "▁"
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , snake_case: Union[str, Any] , snake_case: Tuple=True , snake_case: Tuple=True , snake_case: List[str]=False , snake_case: str="[CLS]" , snake_case: Union[str, Any]="[SEP]" , snake_case: Union[str, Any]="<unk>" , snake_case: int="[SEP]" , snake_case: Optional[Any]="<pad>" , snake_case: Dict="[CLS]" , snake_case: Any="[MASK]" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ :int = (
AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case )
if isinstance(snake_case , snake_case )
else mask_token
)
snake_case_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :Any = do_lower_case
snake_case_ :Optional[int] = remove_space
snake_case_ :Optional[Any] = keep_accents
snake_case_ :List[str] = vocab_file
snake_case_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def lowerCAmelCase_ ( self: int ) -> str:
return len(self.sp_model )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: str ) -> List[str]:
snake_case_ :List[str] = self.__dict__.copy()
snake_case_ :Optional[Any] = None
return state
def __setstate__( self: Optional[int] , snake_case: Union[str, Any] ) -> Dict:
snake_case_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :str = {}
snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] ) -> List[str]:
if self.remove_space:
snake_case_ :str = """ """.join(inputs.strip().split() )
else:
snake_case_ :List[str] = inputs
snake_case_ :Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ :Optional[Any] = unicodedata.normalize("""NFKD""" , snake_case )
snake_case_ :Tuple = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
snake_case_ :Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[str]:
snake_case_ :Tuple = self.preprocess_text(snake_case )
snake_case_ :str = self.sp_model.encode(snake_case , out_type=snake_case )
snake_case_ :List[str] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ :Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ :List[Any] = cur_pieces[1:]
else:
snake_case_ :Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def lowerCAmelCase_ ( self: Optional[int] , snake_case: Union[str, Any] ) -> Any:
return self.sp_model.PieceToId(snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: Dict ) -> str:
return self.sp_model.IdToPiece(snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict ) -> Optional[int]:
snake_case_ :str = []
snake_case_ :Tuple = """"""
snake_case_ :Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
snake_case_ :Dict = True
snake_case_ :Dict = []
else:
current_sub_tokens.append(snake_case )
snake_case_ :Dict = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :int = [self.sep_token_id]
snake_case_ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Tuple = [self.sep_token_id]
snake_case_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 66 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 67 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 68 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
"""simple docstring"""
import math
import sys
def UpperCAmelCase ( UpperCAmelCase ) -> int:
if number != int(UpperCAmelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
snake_case_ = [-1] * (number + 1)
snake_case_ = 0
for i in range(1 , number + 1 ):
snake_case_ = sys.maxsize
snake_case_ = int(math.sqrt(UpperCAmelCase ) )
for j in range(1 , root + 1 ):
snake_case_ = 1 + answers[i - (j**2)]
snake_case_ = min(UpperCAmelCase , UpperCAmelCase )
snake_case_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int ={
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] =[
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
import sys
A_ :List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def A ( a_ = N ) -> int:
__UpperCamelCase : List[str] =-sys.maxsize - 1
for i in range(len(a_ ) - 12 ):
__UpperCamelCase : Optional[int] =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase : List[str] =product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 71 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 0 |
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_1 ) -> int:
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : str = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''speech_to_text'''
_lowerCamelCase: Union[str, Any] = ['''past_key_values''']
_lowerCamelCase: Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,A_ : Tuple=1_0000 ,A_ : Union[str, Any]=12 ,A_ : int=2048 ,A_ : List[Any]=4 ,A_ : int=6 ,A_ : List[Any]=2048 ,A_ : List[str]=4 ,A_ : Optional[Any]=0.0 ,A_ : str=0.0 ,A_ : Dict=True ,A_ : List[Any]=True ,A_ : int="relu" ,A_ : List[str]=256 ,A_ : Dict=0.1 ,A_ : List[Any]=0.0 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=0.02 ,A_ : str=2 ,A_ : Union[str, Any]=True ,A_ : List[str]=1 ,A_ : Union[str, Any]=0 ,A_ : int=2 ,A_ : int=6000 ,A_ : Dict=1024 ,A_ : str=2 ,A_ : Tuple=(5, 5) ,A_ : List[str]=1024 ,A_ : str=80 ,A_ : int=1 ,**A_ : Union[str, Any] ,) -> Optional[Any]:
A = vocab_size
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
A = num_conv_layers
A = list(A_ )
A = conv_channels
A = input_feat_per_channel
A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,decoder_start_token_id=A_ ,**A_ ,) | 74 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowercase : List[str]
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowercase : Optional[List] =None
lowercase : Optional[int] =None
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ =len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ =[]
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase, lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_, lowerCamelCase_ =zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 75 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Any = defaultdict(_a)
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost])
adjancency[nodea].append([nodea, cost])
SCREAMING_SNAKE_CASE : List[Any] = mst(_a)
SCREAMING_SNAKE_CASE : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2])
SCREAMING_SNAKE_CASE : Optional[int] = tuple(edge[::-1])
assert edge in result or reverse in result | 76 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Any = logging.getLogger(__name__)
_UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a)} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "The input training data file (a text file)."})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."})
lowerCamelCase__ : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
lowerCamelCase__ : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowerCamelCase__ : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
lowerCamelCase__ : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def a_ ( _lowerCAmelCase : DataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ):
'''simple docstring'''
def _dataset(_lowerCAmelCase : Any , _lowerCAmelCase : Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowercase__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowercase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowercase__ : int = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowercase__ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Tuple = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : Optional[Any] = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : str = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Dict = trainer.evaluate()
lowercase__ : List[Any] = math.exp(eval_output['eval_loss'] )
lowercase__ : int = {'perplexity': perplexity}
lowercase__ : int = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 77 |
import random
from typing import Any
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
snake_case_ = """base_with_context"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase = ly_weight['attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = ly_weight['attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase = ly_weight['self_attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase = jnp.tree_util.tree_map(onp.array , lowercase_ )
UpperCAmelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase = inference.parse_training_gin_file(lowercase_ , lowercase_ )
UpperCAmelCase = inference.InferenceModel(args.checkpoint_path , lowercase_ )
UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase_ )
UpperCAmelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase_ )
UpperCAmelCase = load_decoder(ta_checkpoint['target']['decoder'] , lowercase_ )
UpperCAmelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
snake_case_ = parser.parse_args()
main(args)
| 78 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 79 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Any = False
class lowercase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
image=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.