code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowercase__ : str = True
from torch.cuda.amp import autocast
lowercase__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCAmelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.999_995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _lowerCAmelCase ( __snake_case : ModelArguments , __snake_case : TrainingArguments ) -> Optional[int]:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__A : Union[str, Any] = logging.WARNING
if model_args.verbose_logging:
__A : Optional[int] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__A : str = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCAmelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCAmelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = "longest"
lowerCAmelCase = None
lowerCAmelCase = None
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__A : Tuple = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1])
__A : Any = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__A : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1)).to(
torch.long)
__A : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
__A : List[str] = 1
__A : Optional[Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
__A : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , *_UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
__A : Tuple = 0
__A : List[Any] = max_gumbel_temp
__A : int = min_gumbel_temp
__A : Union[str, Any] = gumbel_temp_decay
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
model.train()
__A : Any = self._prepare_inputs(_UpperCAmelCase)
if self.use_amp:
with autocast():
__A : Any = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : Tuple = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__A : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__A : List[Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
__A : Dict = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def _lowerCAmelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A ,__A ,__A : Any = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
__A : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__A : Optional[int] = DatasetDict()
__A : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
__A : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__A : Dict = DatasetDict()
__A : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__A : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__A : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : Optional[Any] ):
# check that all files have the correct sampling rate
__A ,__A : Tuple = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__A : List[str] = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__A : List[str] = vectorized_datasets.filter(
lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : int ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__A : str = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__A : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__A : int = WavaVecaForPreTraining(__snake_case )
__A : Optional[int] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
__A : Dict = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ ) ->float:
"""simple docstring"""
return 10 - x * x
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->float:
"""simple docstring"""
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase : Tuple = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
__UpperCAmelCase : Union[str, Any] = c
else:
__UpperCAmelCase : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 374 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = BertJapaneseTokenizer
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = True
def _A ( self : Tuple ):
super().setUp()
SCREAMING_SNAKE_CASE : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Tuple , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE : Tuple = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _A ( self : List[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.get_input_output_texts(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def _A ( self : List[Any] ):
pass # TODO add if relevant
def _A ( self : str ):
pass # TODO add if relevant
def _A ( self : Optional[Any] ):
pass # TODO add if relevant
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
SCREAMING_SNAKE_CASE : List[Any] = pickle.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Dict ):
try:
SCREAMING_SNAKE_CASE : List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Dict ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Any = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : List[str] ):
try:
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
SCREAMING_SNAKE_CASE : List[str] = pickle.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
SCREAMING_SNAKE_CASE : List[Any] = pickle.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE : str = {}
for i, token in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = i
SCREAMING_SNAKE_CASE : Optional[Any] = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : Any = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCAmelCase_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE : List[str] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCAmelCase_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BertJapaneseTokenizer
UpperCamelCase_ : Tuple = False
def _A ( self : str ):
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : str , **UpperCAmelCase_ : int ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCAmelCase_ )
def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE : Optional[Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _A ( self : Any ):
pass # TODO add if relevant
def _A ( self : Optional[int] ):
pass # TODO add if relevant
def _A ( self : Dict ):
pass # TODO add if relevant
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCAmelCase_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE : Optional[int] = {}
for i, token in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = i
SCREAMING_SNAKE_CASE : List[str] = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE : str = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE : Optional[Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 62 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : int = 101 ):
__lowercase : Tuple = length
def __len__( self : str ):
return self.length
def __getitem__( self : List[Any] , _snake_case : List[str] ):
return i
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Union[str, Any] , _snake_case : List[str] ):
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase : str = nn.Linear(120 , 80 )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[int]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def snake_case_ ( self : Tuple ):
__lowercase : List[str] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Any = self.get_auto_remove_tmp_dir()
__lowercase : List[str] = F'--output_dir {output_dir}'.split()
__lowercase : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def snake_case_ ( self : str ):
__lowercase : Tuple = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : str = F'--output_dir {output_dir}'.split()
__lowercase : Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : int = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Dict = list(range(len(__lowerCAmelCase ) ) )
__lowercase : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : List[str] = None
| 509 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 720 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 339 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = original_name.split(""".""" )[0]
A : Optional[int] = key.split(""".""" )
A : Any = int(key_list[key_list.index(_lowerCAmelCase ) - 2] )
A : Any = int(key_list[key_list.index(_lowerCAmelCase ) - 1] )
A : Dict = orig_block_num - offset
A : Tuple = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = OrderedDict()
A , A : List[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
A : Tuple = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
A : Any = key[: key.find("""proj""" )]
A : Any = key.replace(_lowerCAmelCase , f'''patch_embeddings.{total_embed_found}.''' )
A : Tuple = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
A : Tuple = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
A : Optional[Any] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
A : List[str] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
A : List[str] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """norm1""" , """before_norm""" )
if "norm2" in key:
A : Optional[Any] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
A : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
A : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
A : Optional[Any] = key.replace("""head""" , """classifier""" )
A : str = value
return new_state_dict
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = PoolFormerConfig()
# set attributes based on model_name
A : Union[str, Any] = """huggingface/label-files"""
A : str = model_name[-3:]
A : Optional[int] = 1000
A : int = """imagenet-1k-id2label.json"""
A : Dict = (1, 1000)
# set config attributes
A : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : List[str] = idalabel
A : Optional[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
A : Union[str, Any] = [2, 2, 6, 2]
A : List[str] = [64, 128, 320, 512]
A : int = 4.0
A : Optional[Any] = 0.9
elif size == "s24":
A : Optional[int] = [4, 4, 12, 4]
A : Union[str, Any] = [64, 128, 320, 512]
A : Union[str, Any] = 4.0
A : str = 0.9
elif size == "s36":
A : Any = [6, 6, 18, 6]
A : Optional[int] = [64, 128, 320, 512]
A : Optional[int] = 4.0
A : int = 1e-6
A : Any = 0.9
elif size == "m36":
A : Dict = [6, 6, 18, 6]
A : Union[str, Any] = [96, 192, 384, 768]
A : str = 4.0
A : Union[str, Any] = 1e-6
A : List[str] = 0.95
elif size == "m48":
A : Any = [8, 8, 24, 8]
A : Optional[Any] = [96, 192, 384, 768]
A : str = 4.0
A : Dict = 1e-6
A : Dict = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
# Prepare image
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A : Tuple = torch.load(_lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
A : int = rename_keys(_lowerCAmelCase )
# create HuggingFace model and load state dict
A : int = PoolFormerForImageClassification(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Define image processor
A : List[str] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
A : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
A : Dict = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
A : Any = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
A : Union[str, Any] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
A : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
A : Tuple = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
A : Optional[Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 520 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A : List[Any] = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A : Union[str, Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A : str = len(_lowerCAmelCase )
A : Union[str, Any] = 0
A : Any = 0
A : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A : str = sorted_profit_by_weight[length - i - 1]
A : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A : Tuple = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:Optional[int] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:int = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 520 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : Tuple ):
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Tuple = set({"""(""", """[""", """{"""} )
UpperCAmelCase : Any = set({""")""", """]""", """}"""} )
UpperCAmelCase : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(UpperCAmelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase__ ) == 0 or (len(UpperCAmelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase__ ) == 0
def _snake_case ( ):
UpperCAmelCase : Tuple = input("""Enter sequence of brackets: """ )
if is_balanced(UpperCAmelCase__ ):
print(UpperCAmelCase__ , """is balanced""" )
else:
print(UpperCAmelCase__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 160 | """simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__magic_name__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__magic_name__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__magic_name__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__magic_name__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__magic_name__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Optional[int] =VOCAB_FILES_NAMES
a_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Optional[int] =VOCAB_FILES_NAMES
a_ : int =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ : Any =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__magic_name__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__magic_name__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(a )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self : str , _snake_case : Tuple , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Union[bool, str] = False , _snake_case : Union[bool, str] = False , _snake_case : Optional[int] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[bool] = None , **_snake_case : List[str] , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
elif titles is None or texts is None:
a__ = titles if texts is None else texts
return super().__call__(
_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
a__ = titles if not isinstance(_snake_case , _snake_case ) else [titles]
a__ = texts if not isinstance(_snake_case , _snake_case ) else [texts]
a__ = len(_snake_case )
a__ = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts.''' )
a__ = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )['input_ids']
a__ = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )['input_ids']
a__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case )
]
}
if return_attention_mask is not False:
a__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ = attention_mask
return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case )
def _lowerCAmelCase ( self : List[str] , _snake_case : BatchEncoding , _snake_case : DPRReaderOutput , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ = reader_input['input_ids']
a__ , a__ , a__ = reader_output[:3]
a__ = len(_snake_case )
a__ = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ )
a__ = []
for doc_id in sorted_docs:
a__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ = sequence_ids.index(self.pad_token_id )
else:
a__ = len(_snake_case )
a__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCAmelCase ( self : Dict , _snake_case : List[int] , _snake_case : List[int] , _snake_case : int , _snake_case : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ = []
for start_index, start_score in enumerate(_snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case )
a__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
a__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class SCREAMING_SNAKE_CASE ( a , a ):
"""simple docstring"""
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : Any =READER_PRETRAINED_VOCAB_FILES_MAP
a_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] =READER_PRETRAINED_INIT_CONFIGURATION
a_ : Tuple =["input_ids", "attention_mask"]
| 232 | 0 |
import pprint
import requests
UpperCamelCase__ = 'https://zenquotes.io/api'
def lowerCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowerCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCamelCase__ = random_quotes()
pprint.pprint(response)
| 709 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( lowercase ):
UpperCamelCase : List[Any] = """deit"""
def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=224 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[Any] = qkv_bias
UpperCAmelCase__ : Dict = encoder_stride
class a ( lowercase ):
UpperCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 254 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( a ):
"""simple docstring"""
A_ = (DDIMParallelScheduler,)
A_ = (('eta', 0.0), ('num_inference_steps', 5_0))
def snake_case_( self , **_lowerCamelCase )-> List[Any]:
lowercase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_lowerCamelCase )
return config
def snake_case_( self , **_lowerCamelCase )-> List[str]:
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_lowerCamelCase )
lowercase__ = scheduler_class(**_lowerCamelCase )
lowercase__ , lowercase__ = 1_0, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
lowercase__ = model(_lowerCamelCase , _lowerCamelCase )
lowercase__ = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def snake_case_( self )-> Optional[Any]:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def snake_case_( self )-> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1 )
lowercase__ = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def snake_case_( self )-> List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def snake_case_( self )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def snake_case_( self )-> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def snake_case_( self )-> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def snake_case_( self )-> str:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def snake_case_( self )-> Union[str, Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def snake_case_( self )-> Optional[Any]:
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def snake_case_( self )-> Tuple:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=_lowerCamelCase )
def snake_case_( self )-> int:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def snake_case_( self )-> List[Any]:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def snake_case_( self )-> Optional[Any]:
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4_7_7_1 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2_4_6_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.0_2 ) ) < 1e-5
def snake_case_( self )-> Tuple:
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowerCamelCase )
lowercase__ , lowercase__ = 1_0, 0.0
scheduler.set_timesteps(_lowerCamelCase )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase__ = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
lowercase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase__ = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
lowercase__ = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1e-3
def snake_case_( self )-> str:
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1e-3
def snake_case_( self )-> Any:
lowercase__ = self.full_loop(prediction_type='''v_prediction''' )
lowercase__ = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1e-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1e-3
def snake_case_( self )-> Any:
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.0_1 )
lowercase__ = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1e-3
def snake_case_( self )-> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.0_1 )
lowercase__ = torch.sum(torch.abs(_lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1e-3
| 161 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : float , lowercase : float ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : str = 8
# DPR tok
UpperCAmelCase : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_dummy_dataset()
UpperCAmelCase : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCAmelCase : Tuple = dataset
UpperCAmelCase : List[str] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_dummy_dataset()
UpperCAmelCase : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
UpperCAmelCase : str = os.path.join(self.tmpdirname , """dataset""" )
UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
UpperCAmelCase : List[Any] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase : Optional[Any] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
UpperCAmelCase : Any = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(_SCREAMING_SNAKE_CASE , open(_SCREAMING_SNAKE_CASE , """wb""" ) )
UpperCAmelCase : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
UpperCAmelCase : str = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = 1
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
UpperCAmelCase : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Optional[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Union[str, Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[str] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : str = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : int = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Dict = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Tuple = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
import torch
UpperCAmelCase : Dict = 1
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : List[str] = [[5, 7], [10, 11]]
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Union[str, Any] = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
UpperCAmelCase : Any = retriever(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
UpperCAmelCase : Dict = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase : Any = 1
UpperCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = [[5, 7], [10, 11]]
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Tuple = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE )
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , _SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 707 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = PriorTransformer
__lowerCAmelCase : Dict = 'hidden_states'
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Union[str, Any] = 7
UpperCAmelCase : Any = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = 4
UpperCAmelCase : Any = 8
UpperCAmelCase : List[Any] = 7
UpperCAmelCase : Any = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = self.model_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCAmelCase : Dict = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCAmelCase : str = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Dict = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase : Dict = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = embedding_dim
UpperCAmelCase : Tuple = num_embeddings
UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase : int = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 359 | 0 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : list ) -> None:
A = set_counts
A = max(A_ )
A = len(A_ )
A = [1] * num_sets
A = list(range(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> bool:
A = self.get_parent(A_ )
A = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A = 0
A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A = 0
A = src_parent
A = self.set_counts[src_parent]
A = max(self.max_set ,A_ )
return True
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 91 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : List[str]=2 , snake_case_ : Optional[int]=3 , snake_case_ : Union[str, Any]=6_4 , snake_case_ : Optional[Any]=None ):
'''simple docstring'''
snake_case__ : List[str] = np.random.default_rng(snake_case_ )
snake_case__ : int = length
snake_case__ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
snake_case__ : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.length
def __getitem__( self : List[str] , snake_case_ : int ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case_ : str=0 , snake_case_ : Optional[Any]=0 , snake_case_ : Tuple=False ):
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : int = True
def __magic_name__ ( self : int , snake_case_ : str=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : str = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Tuple=0 , snake_case_ : int=0 , snake_case_ : int=False ):
'''simple docstring'''
super().__init__()
snake_case__ : Tuple = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : int = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : Union[str, Any] = True
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : List[Any] = False
return x * self.a + self.b
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : Optional[int] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
snake_case__ : List[Any] = load_dataset('''csv''' , data_files=__lowerCAmelCase )
snake_case__ : Union[str, Any] = datasets['''train'''].unique('''label''' )
snake_case__ : Optional[Any] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
if "label" in examples:
snake_case__ : List[Any] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : List[Any] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowerCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case__ : str = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
snake_case__ : List[Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 347 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : str=3_0 , __UpperCamelCase : List[str]=4_0_0 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=0.9 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : int=True , __UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , __UpperCamelCase : str=[0.5, 0.5, 0.5] , )->Dict:
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 3_0}
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = PoolFormerImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any )->str:
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def lowercase__ ( self : str )->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str )->List[Any]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_std''' ) )
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 3_0, '''width''': 3_0} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase__ ( self : Dict )->List[str]:
pass
def lowercase__ ( self : List[Any] )->List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[int] )->List[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : List[Any] )->List[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 702 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _a ( yaml.SafeLoader):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCAmelCase = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
_UpperCAmelCase = Counter(__UpperCamelCase )
_UpperCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=False )->Dict:
_UpperCAmelCase = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCAmelCase = full_content[1:].index('''---''' ) + 1
_UpperCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
# class attributes
UpperCamelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def lowercase__ ( cls : List[Any] , __UpperCamelCase : Path )->"DatasetMetadata":
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def lowercase__ ( self : Tuple , __UpperCamelCase : Path )->List[Any]:
if path.exists():
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase = readme_file.read()
else:
_UpperCAmelCase = None
_UpperCAmelCase = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[str] = None )->str:
if readme_content is not None:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(__UpperCamelCase )
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def lowercase__ ( cls : str , __UpperCamelCase : str )->"DatasetMetadata":
_UpperCAmelCase = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def lowercase__ ( self : str )->str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
__A : str = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A : str = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__A : Union[str, Any] = ap.parse_args()
__A : Dict = Path(args.readme_filepath)
__A : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 95 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
@slow
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
'''simple docstring'''
import random
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = num - 1
_lowercase = 0
while s % 2 == 0:
_lowercase = s // 2
t += 1
for _ in range(5 ):
_lowercase = random.randrange(2 , num - 1 )
_lowercase = pow(snake_case_ , snake_case_ , snake_case_ )
if v != 1:
_lowercase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowercase = i + 1
_lowercase = (v**2) % num
return True
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if num < 2:
return False
_lowercase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ = 1024 ):
while True:
_lowercase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case_ ):
return num
if __name__ == "__main__":
_lowerCamelCase = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 572 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = 0
_lowercase = number
while duplicate > 0:
_lowercase , _lowercase = divmod(snake_case_ , 10 )
fact_sum += factorial(snake_case_ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
_lowerCamelCase = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 572 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase_ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(features=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowercase ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column:
if all(
isinstance(lowerCAmelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase__ )
return column
def __lowercase ( self : Dict , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ):
return value
elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Any = {}
if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''dtype''': torch.intaa}
elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(lowerCAmelCase__ )
return torch.tensor(lowerCAmelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowercase ( self : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase__ , '''__array__''' ) and not isinstance(lowerCAmelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase__ )
def __lowercase ( self : str , lowerCAmelCase__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ )
def __lowercase ( self : Dict , lowerCAmelCase__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_row(lowerCAmelCase__ )
return self.recursive_tensorize(lowerCAmelCase__ )
def __lowercase ( self : Any , lowerCAmelCase__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : Any = self.recursive_tensorize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCAmelCase__ )
return column
def __lowercase ( self : List[Any] , lowerCAmelCase__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.recursive_tensorize(lowerCAmelCase__ )
for column_name in batch:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 527 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE : List[Any] = torch.load(A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE : Optional[int] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
SCREAMING_SNAKE_CASE : int = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE : Optional[int] = sd.pop(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE : str = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = torch.split(A , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = q
SCREAMING_SNAKE_CASE : List[str] = k
SCREAMING_SNAKE_CASE : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( A : int , A : Union[str, Any] , A : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[Any] = load_checkpoint(A )
if config is not None:
SCREAMING_SNAKE_CASE : Tuple = OPTConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : int = OPTConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowerCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 527 | 1 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
def update_area_of_max_square(lowerCAmelCase_, lowerCAmelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE =update_area_of_max_square(_lowerCamelCase, col + 1 )
SCREAMING_SNAKE_CASE =update_area_of_max_square(row + 1, col + 1 )
SCREAMING_SNAKE_CASE =update_area_of_max_square(row + 1, _lowerCamelCase )
if mat[row][col]:
SCREAMING_SNAKE_CASE =1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE =max(largest_square_area[0], _lowerCamelCase )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE =update_area_of_max_square_using_dp_array(_lowerCamelCase, col + 1, _lowerCamelCase )
SCREAMING_SNAKE_CASE =update_area_of_max_square_using_dp_array(row + 1, col + 1, _lowerCamelCase )
SCREAMING_SNAKE_CASE =update_area_of_max_square_using_dp_array(row + 1, _lowerCamelCase, _lowerCamelCase )
if mat[row][col]:
SCREAMING_SNAKE_CASE =1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE =max(largest_square_area[0], _lowerCamelCase )
SCREAMING_SNAKE_CASE =sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE =[0]
SCREAMING_SNAKE_CASE =[[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0, 0, _lowerCamelCase )
return largest_square_area[0]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
SCREAMING_SNAKE_CASE =dp_array[row][col + 1]
SCREAMING_SNAKE_CASE =dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE =dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE =1 + min(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
SCREAMING_SNAKE_CASE =max(dp_array[row][col], _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE =0
return largest_square_area
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[0] * (cols + 1)
SCREAMING_SNAKE_CASE =[0] * (cols + 1)
SCREAMING_SNAKE_CASE =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
SCREAMING_SNAKE_CASE =current_row[col + 1]
SCREAMING_SNAKE_CASE =next_row[col + 1]
SCREAMING_SNAKE_CASE =next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE =1 + min(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
SCREAMING_SNAKE_CASE =max(current_row[col], _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 719 |
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCAmelCase_, 0, lowerCAmelCase_, args=(lowerCAmelCase_) )[0]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''lxmert'''
UpperCamelCase = {}
def __init__( self : Dict , A_ : Optional[Any]=30522 , A_ : Optional[int]=768 , A_ : int=12 , A_ : List[str]=9500 , A_ : int=1600 , A_ : str=400 , A_ : int=3072 , A_ : Optional[int]="gelu" , A_ : Dict=0.1 , A_ : Dict=0.1 , A_ : Tuple=512 , A_ : Optional[Any]=2 , A_ : str=0.02 , A_ : str=1E-12 , A_ : Optional[int]=9 , A_ : Optional[int]=5 , A_ : str=5 , A_ : str=2048 , A_ : Optional[int]=4 , A_ : int=6.67 , A_ : Optional[int]=True , A_ : int=True , A_ : Dict=True , A_ : Tuple=True , A_ : Union[str, Any]=True , A_ : Optional[Any]=True , A_ : Optional[Any]=True , **A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = num_qa_labels
lowerCamelCase_ = num_object_labels
lowerCamelCase_ = num_attr_labels
lowerCamelCase_ = l_layers
lowerCamelCase_ = x_layers
lowerCamelCase_ = r_layers
lowerCamelCase_ = visual_feat_dim
lowerCamelCase_ = visual_pos_dim
lowerCamelCase_ = visual_loss_normalizer
lowerCamelCase_ = task_matched
lowerCamelCase_ = task_mask_lm
lowerCamelCase_ = task_obj_predict
lowerCamelCase_ = task_qa
lowerCamelCase_ = visual_obj_loss
lowerCamelCase_ = visual_attr_loss
lowerCamelCase_ = visual_feat_loss
lowerCamelCase_ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**A_ )
| 70 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase : Any = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 302 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 703 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
lowerCamelCase_ = Dataset.from_dict(_lowerCamelCase )
return dataset
class a ( __snake_case ):
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowerCamelCase_ = get_dataset()
lowerCamelCase_ = make_duplicate_clusters(__SCREAMING_SNAKE_CASE , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ = get_dataset()
lowerCamelCase_ , lowerCamelCase_ = deduplicate_dataset(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
print(__SCREAMING_SNAKE_CASE )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __SCREAMING_SNAKE_CASE )
| 549 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : int=30 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
lowerCamelCase_ = ViTMSNModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : List[str] ) -> Tuple:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __snake_case , __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def UpperCamelCase ( self : List[Any] ) -> List[str]:
lowerCamelCase_ = ViTMSNModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase ( self : Optional[int] ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : int ) -> List[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ) -> List[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Optional[int] ) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Dict ) -> Any:
torch.manual_seed(2 )
lowerCamelCase_ = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 549 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''blip_text_model'''
def __init__( self : Optional[int] , _UpperCAmelCase : Dict=30524 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : int=30522 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : str=102 , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=True , **_UpperCAmelCase : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = encoder_hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = is_decoder
UpperCAmelCase_ = use_cache
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''blip_vision_model'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Dict=3072 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : List[str]=384 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Union[str, Any]=1e-10 , **_UpperCAmelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
@classmethod
def lowercase__ ( cls : str , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''blip'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int=512 , _UpperCAmelCase : Dict=2.6592 , _UpperCAmelCase : int=256 , **_UpperCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase_ = BlipTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = BlipVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = self.vision_config.hidden_size
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.02
UpperCAmelCase_ = image_text_hidden_size
@classmethod
def lowercase__ ( cls : List[Any] , _UpperCAmelCase : BlipTextConfig , _UpperCAmelCase : BlipVisionConfig , **_UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 14 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Tuple=32 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=3 , __magic_name__ : Dict=16 , __magic_name__ : List[Any]=[1, 2, 1] , __magic_name__ : Union[str, Any]=[2, 2, 4] , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : List[str]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Any=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : Dict=True , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=True , __magic_name__ : str=10 , __magic_name__ : List[str]=8 , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = SwinvaForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = SwinvaForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
snake_case__ :Any = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
snake_case__ :Optional[int] = False
snake_case__ :Union[str, Any] = False
snake_case__ :Any = False
snake_case__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__magic_name__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = config.window_size**2
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ = len(__magic_name__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
lowerCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swinv2 has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwinvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__magic_name__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**__magic_name__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 48 | """simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 159 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = BertJapaneseTokenizer
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
lowerCAmelCase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase_ )
lowerCAmelCase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
lowerCAmelCase__ = pickle.load(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
try:
lowerCAmelCase__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
try:
lowerCAmelCase__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MecabTokenizer(do_lower_case=lowerCamelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
try:
lowerCAmelCase__ = MecabTokenizer(
do_lower_case=lowerCamelCase_ , normalize_text=lowerCamelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = MecabTokenizer(normalize_text=lowerCamelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase_ )
lowerCAmelCase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
lowerCAmelCase__ = pickle.load(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = SudachiTokenizer(do_lower_case=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = SudachiTokenizer(normalize_text=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = SudachiTokenizer(trim_whitespace=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase_ )
lowerCAmelCase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
lowerCAmelCase__ = pickle.load(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = JumanppTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = JumanppTokenizer(normalize_text=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = JumanppTokenizer(trim_whitespace=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase__ = {}
for i, token in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase__ = tokenizer.subword_tokenizer
lowerCAmelCase__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = BertJapaneseTokenizer
lowercase__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass # TODO add if relevant
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase__ = {}
for i, token in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = CharacterTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase__ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) ) | 98 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''RegNetConfig'''
# Base docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ = config.num_channels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Any:
super().__init__()
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tensor:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase__ = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
lowerCAmelCase__ = self.attention(lowerCamelCase_ )
lowerCAmelCase__ = hidden_state * attention
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> Dict:
super().__init__()
lowerCAmelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCAmelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.layers(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = RegNetConfig
lowercase__ : Tuple = "regnet"
lowercase__ : List[str] = "pixel_values"
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = value
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = RegNetEmbeddings(lowerCamelCase_ )
lowerCAmelCase__ = RegNetEncoder(lowerCamelCase_ )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
lowerCAmelCase__ = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = RegNetModel(lowerCamelCase_ )
# classification head
lowerCAmelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(lowerCamelCase_ )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states ) | 98 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
A__: Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = os.path.dirname(os.path.realpath(A_))
UpperCamelCase__: Optional[int] = os.path.join(A_ ,"words.txt")
UpperCamelCase__: Tuple = ""
with open(A_) as f:
UpperCamelCase__: int = f.readline()
UpperCamelCase__: str = [word.strip("\"") for word in words.strip("\r\n").split(",")]
UpperCamelCase__: int = [
word
for word in [sum(ord(A_) - 64 for x in word) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A_)
if __name__ == "__main__":
print(solution())
| 380 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__: Any = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 128
elif "12-12" in model_name:
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
__SCREAMING_SNAKE_CASE = 14
__SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 16
else:
raise ValueError("""Model not supported""" )
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 35
__SCREAMING_SNAKE_CASE = """speech-commands-v2-id2label.json"""
else:
__SCREAMING_SNAKE_CASE = 527
__SCREAMING_SNAKE_CASE = """audioset-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if "module.v" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" )
# remove some keys
remove_keys(__UpperCAmelCase )
# rename some keys
__SCREAMING_SNAKE_CASE = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
# load 🤗 model
__SCREAMING_SNAKE_CASE = ASTForAudioClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__SCREAMING_SNAKE_CASE = -4.2_6_7_7_3_9_3 if """speech-commands""" not in model_name else -6.8_4_5_9_7_8
__SCREAMING_SNAKE_CASE = 4.5_6_8_9_9_7_4 if """speech-commands""" not in model_name else 5.5_6_5_4_5_2_6
__SCREAMING_SNAKE_CASE = 1024 if """speech-commands""" not in model_name else 128
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=__UpperCAmelCase , std=__UpperCAmelCase , max_length=__UpperCAmelCase )
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__SCREAMING_SNAKE_CASE = dataset[0]["""audio"""]["""array"""]
else:
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torchaudio.load(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
__SCREAMING_SNAKE_CASE = feature_extractor(__UpperCAmelCase , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError("""Logits don\'t match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase_( lowerCamelCase_ ) -> typing.Counter[int]:
_lowercase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase_ , max_perimeter + 1 ):
_lowercase : str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase_ ):
_lowercase : Any = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
_lowercase : List[str] = pythagorean_triple(lowerCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 89 |
"""simple docstring"""
import operator as op
SCREAMING_SNAKE_CASE_ = '''scaler.pt'''
SCREAMING_SNAKE_CASE_ = '''pytorch_model'''
SCREAMING_SNAKE_CASE_ = '''random_states'''
SCREAMING_SNAKE_CASE_ = '''optimizer'''
SCREAMING_SNAKE_CASE_ = '''scheduler'''
SCREAMING_SNAKE_CASE_ = '''pytorch_model.bin'''
SCREAMING_SNAKE_CASE_ = '''pytorch_model.bin.index.json'''
SCREAMING_SNAKE_CASE_ = '''model.safetensors'''
SCREAMING_SNAKE_CASE_ = '''model.safetensors.index.json'''
SCREAMING_SNAKE_CASE_ = '''1.10.2'''
SCREAMING_SNAKE_CASE_ = '''py38'''
SCREAMING_SNAKE_CASE_ = '''4.17.0'''
SCREAMING_SNAKE_CASE_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
SCREAMING_SNAKE_CASE_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
SCREAMING_SNAKE_CASE_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
SCREAMING_SNAKE_CASE_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
SCREAMING_SNAKE_CASE_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
SCREAMING_SNAKE_CASE_ = '''2.0.1'''
SCREAMING_SNAKE_CASE_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
SCREAMING_SNAKE_CASE_ = ['''default''', '''reduce-overhead''', '''max-autotune''']
SCREAMING_SNAKE_CASE_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE_ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
SCREAMING_SNAKE_CASE_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
SCREAMING_SNAKE_CASE_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 465 | 0 |
import operator
def _lowerCamelCase ( snake_case , snake_case = False , snake_case = None ):
_lowerCAmelCase = operator.lt if reverse else operator.gt
_lowerCAmelCase = solution or []
if not arr:
return solution
_lowerCAmelCase = [arr.pop(0 )]
for i, item in enumerate(snake_case ):
if _operator(snake_case , sublist[-1] ):
sublist.append(snake_case )
arr.pop(snake_case )
# merging sublist into solution list
if not solution:
solution.extend(snake_case )
else:
while sublist:
_lowerCAmelCase = sublist.pop(0 )
for i, xx in enumerate(snake_case ):
if not _operator(snake_case , snake_case ):
solution.insert(snake_case , snake_case )
break
else:
solution.append(snake_case )
strand_sort(snake_case , snake_case , snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 225 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase__ ( UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( lowercase__ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
raise NotImplementedError()
| 225 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a = datasets.utils.logging.get_logger(__name__)
a = ["""names""", """prefix"""]
a = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a = ["""encoding_errors""", """on_bad_lines"""]
a = ["""date_format"""]
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : str = ","
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[List[str]] = None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
lowerCamelCase : Optional[Union[List[int], List[str]]] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : Optional[list] = None
lowerCamelCase : bool = False
lowerCamelCase : Optional[Union[int, List[int]]] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[Union[str, List[str]]] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = "."
lowerCamelCase : Optional[str] = None
lowerCamelCase : str = '"'
lowerCamelCase : int = 0
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = True
lowerCamelCase : bool = True
lowerCamelCase : int = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : Optional[str] = None
lowerCamelCase : int = 1_00_00
lowerCamelCase : Optional[datasets.Features] = None
lowerCamelCase : Optional[str] = "strict"
lowerCamelCase : Literal["error", "warn", "skip"] = "error"
lowerCamelCase : Optional[str] = None
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
if self.delimiter is not None:
_lowerCAmelCase :Tuple = self.delimiter
if self.column_names is not None:
_lowerCAmelCase :str = self.column_names
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Dict = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Dict = CsvConfig
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :List[Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
_lowerCAmelCase :Tuple = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Dict = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Optional[Any] = [files]
_lowerCAmelCase :List[str] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
_lowerCAmelCase :str = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCAmelCase :Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCAmelCase :Any = table_cast(_UpperCAmelCase , _UpperCAmelCase )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: List[Any] ):
_lowerCAmelCase :Dict = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCAmelCase :Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
_lowerCAmelCase :int = pd.read_csv(_UpperCAmelCase , iterator=_UpperCAmelCase , dtype=_UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCAmelCase ):
_lowerCAmelCase :Any = pa.Table.from_pandas(_UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise | 382 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
a = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
a = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
a = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 382 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_uncond_unet
UpperCamelCase : Dict = ScoreSdeVeScheduler()
UpperCamelCase : Any = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_A ).images
UpperCamelCase : Optional[int] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_A , return_dict=_A )[
0
]
UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = """google/ncsnpp-church-256"""
UpperCamelCase : Any = UNetaDModel.from_pretrained(_A )
UpperCamelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(_A )
UpperCamelCase : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
UpperCamelCase : Dict = torch.manual_seed(0 )
UpperCamelCase : Any = sde_ve(num_inference_steps=1_0 , output_type="""numpy""" , generator=_A ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCamelCase : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 102 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowercase :
def __init__( self , lowercase=None , **lowercase ) -> int:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCAmelCase = model
lowerCAmelCase = kwargs.get("""model_save_dir""" , lowercase )
lowerCAmelCase = kwargs.get("""latest_model_name""" , lowercase )
def __call__( self , **lowercase ) -> List[Any]:
lowerCAmelCase = {k: np.array(lowercase ) for k, v in kwargs.items()}
return self.model.run(lowercase , lowercase )
@staticmethod
def _snake_case ( lowercase , lowercase=None , lowercase=None ) -> Optional[Any]:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase )
def _snake_case ( self , lowercase , lowercase = None , **lowercase ) -> str:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase = self.model_save_dir.joinpath(lowercase )
if src_path.exists():
lowerCAmelCase = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
def _snake_case ( self , lowercase , **lowercase , ) -> str:
if os.path.isfile(lowercase ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(lowercase , exist_ok=lowercase )
# saving model weights/files
self._save_pretrained(lowercase , **lowercase )
@classmethod
def _snake_case ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> Union[str, Any]:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase ):
lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase )
lowerCAmelCase = Path(lowercase )
# load model from hub
else:
# download model
lowerCAmelCase = hf_hub_download(
repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , )
lowerCAmelCase = Path(lowercase ).parent
lowerCAmelCase = Path(lowercase ).name
lowerCAmelCase = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase )
return cls(model=lowercase , **lowercase )
@classmethod
def _snake_case ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> List[str]:
lowerCAmelCase = None
if len(str(lowercase ).split("""@""" ) ) == 2:
lowerCAmelCase , lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
| 532 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowerCAmelCase : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Dict = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
snake_case_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Union[str, Any] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
else:
snake_case_ : List[Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Dict = fairseq_model.state_dict()
snake_case_ : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Tuple = None
for name, value in fairseq_dict.items():
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : List[Any] = fairseq_model.proj
snake_case_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Union[str, Any] = True
if "*" in mapped_key:
snake_case_ : Union[str, Any] = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
snake_case_ : Tuple = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
snake_case_ : Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ : List[str] = """weight_v"""
elif "bias" in name:
snake_case_ : List[str] = """bias"""
elif "weight" in name:
snake_case_ : List[str] = """weight"""
else:
snake_case_ : Tuple = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = full_name.split("""conv_layers.""" )[-1]
snake_case_ : str = name.split(""".""" )
snake_case_ : Any = int(items[0] )
snake_case_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ : Dict = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.readlines()
snake_case_ : Optional[Any] = [line.split(""" """ )[0] for line in lines]
snake_case_ : Optional[int] = len(__UpperCamelCase )
snake_case_ : Union[str, Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple , ):
'''simple docstring'''
snake_case_ : List[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
snake_case_ , snake_case_ , snake_case_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Any = WavaVecaModel(__UpperCamelCase )
snake_case_ : List[str] = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
snake_case_ : List[str] = SpeechaTextaForCausalLM(__UpperCamelCase )
snake_case_ , snake_case_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ : Any = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
snake_case_ : Tuple = False
# add projection layer
snake_case_ : int = nn.Parameter(projection_layer.weight )
snake_case_ : Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case_ : Dict = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , """vocab.json""" ) )
tokenizer.save_pretrained(__UpperCamelCase )
snake_case_ : Dict = hf_wavavec.config.to_dict()
snake_case_ : List[str] = tokenizer.pad_token_id
snake_case_ : Union[str, Any] = tokenizer.bos_token_id
snake_case_ : str = tokenizer.eos_token_id
snake_case_ : str = """speech_to_text_2"""
snake_case_ : Any = """wav2vec2"""
snake_case_ : Dict = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__lowerCAmelCase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Optional[int] = '\n'.join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open('w' ).writelines(_SCREAMING_SNAKE_CASE )
UpperCamelCase = "patrickvonplaten/t5-tiny-random"
UpperCamelCase = "sshleifer/bart-tiny-random"
UpperCamelCase = "sshleifer/tiny-mbart"
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ ( a_ ):
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowercase : Dict = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : Any = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase : List[Any] = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_lowercase : int = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Any = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ):
run_generate()
assert Path(lowerCAmelCase_ ).exists()
# os.remove(Path(output_file_name))
def __a ( self ):
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __a ( self , _lowerCAmelCase ):
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __a ( self , _lowerCAmelCase ):
_lowercase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowercase : Optional[Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : List[str] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowercase : List[str] = Path(self.get_auto_remove_tmp_dir() )
_lowercase : Tuple = str(tmp_dir / 'scores.json' )
_lowercase : Any = str(tmp_dir / 'val.target' )
_dump_articles(lowerCAmelCase_ , text['en'] )
_dump_articles(lowerCAmelCase_ , text['de'] )
_lowercase : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : List[Any] = F"""
run_eval_search.py
{model}
{str(lowerCAmelCase_ )}
{str(lowerCAmelCase_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowerCAmelCase_ , 'argv' , lowerCAmelCase_ ):
with CaptureStdout() as cs:
run_search()
_lowercase : Optional[int] = [' num_beams | length_penalty', model, 'Best score args']
_lowercase : Tuple = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowerCAmelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase_ ).exists()
os.remove(Path(lowerCAmelCase_ ) )
| 66 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : int ) -> List[str]:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 186 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A : Optional[int] = None
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : Optional[Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__A : Tuple = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__A : List[Any] = "▁"
# Segments (not really needed)
__A : Dict = 0
__A : List[str] = 1
__A : List[str] = 2
__A : int = 3
__A : str = 4
class lowercase_ ( lowerCAmelCase__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self: List[Any], _lowercase: Tuple=None, _lowercase: Any=None, _lowercase: int=False, _lowercase: str=True, _lowercase: Optional[Any]=False, _lowercase: Tuple="<s>", _lowercase: List[Any]="</s>", _lowercase: List[Any]="<unk>", _lowercase: Tuple="<sep>", _lowercase: List[str]="<pad>", _lowercase: str="<cls>", _lowercase: Union[str, Any]="<mask>", _lowercase: str=["<eop>", "<eod>"], **_lowercase: Dict, ):
'''simple docstring'''
__lowerCAmelCase = AddedToken(_lowercase, lstrip=_lowercase, rstrip=_lowercase) if isinstance(_lowercase, _lowercase) else mask_token
super().__init__(
vocab_file=_lowercase, tokenizer_file=_lowercase, do_lower_case=_lowercase, remove_space=_lowercase, keep_accents=_lowercase, bos_token=_lowercase, eos_token=_lowercase, unk_token=_lowercase, sep_token=_lowercase, pad_token=_lowercase, cls_token=_lowercase, mask_token=_lowercase, additional_special_tokens=_lowercase, **_lowercase, )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def _lowercase ( self: List[Any], _lowercase: List[int], _lowercase: Optional[List[int]] = None):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self: int, _lowercase: List[int], _lowercase: Optional[List[int]] = None):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowercase ( self: Optional[Any], _lowercase: str, _lowercase: Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(_lowercase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
__lowerCAmelCase = os.path.join(
_lowercase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase):
copyfile(self.vocab_file, _lowercase)
return (out_vocab_file,)
| 334 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : Optional[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCAmelCase = int(re.match(R""".*layer_(\d*).*""" , UpperCamelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
__lowerCAmelCase = re.search(R"""[^\d](\d+)$""" , str(UpperCamelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
__lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if bloom_config_file == "":
__lowerCAmelCase = BloomConfig()
else:
__lowerCAmelCase = BloomConfig.from_json_file(UpperCamelCase__ )
if shard_model:
__lowerCAmelCase = os.listdir(UpperCamelCase__ )
__lowerCAmelCase = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
__lowerCAmelCase = {"""weight_map""": {}, """metadata""": {}}
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = BloomConfig()
for j, file in enumerate(UpperCamelCase__ ):
print("""Processing file: {}""".format(UpperCamelCase__ ) )
__lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
__lowerCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase__ , os.path.join(
UpperCamelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCAmelCase = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) )
__lowerCAmelCase = BloomConfig()
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowerCAmelCase = total_size
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
else:
__lowerCAmelCase = BloomModel(UpperCamelCase__ )
__lowerCAmelCase = os.listdir(UpperCamelCase__ )
__lowerCAmelCase = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
__lowerCAmelCase = None
for i, file in enumerate(UpperCamelCase__ ):
__lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
__lowerCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
__lowerCAmelCase = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__lowerCAmelCase = set(other_keys.missing_keys )
else:
__lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 334 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class snake_case__ ( nn.Module ):
A__ = 42
A__ = jnp.floataa
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __a : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = hidden_states.shape
__snake_case : int = jax.image.resize(
_lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : str = self.conv(_lowerCAmelCase )
return hidden_states
class snake_case__ ( nn.Module ):
A__ = 42
A__ = jnp.floataa
def A_ ( self : int ) -> int:
'''simple docstring'''
__snake_case : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __a : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.conv(_lowerCAmelCase )
return hidden_states
class snake_case__ ( nn.Module ):
A__ = 42
A__ = None
A__ = 0.0
A__ = None
A__ = jnp.floataa
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__snake_case : Dict = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = nn.Dense(_lowerCAmelCase , dtype=self.dtype )
__snake_case : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__snake_case : Optional[Any] = nn.Dropout(self.dropout_prob )
__snake_case : int = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Tuple = None
if use_nin_shortcut:
__snake_case : Dict = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Dict , __a : str , __a : Union[str, Any] , __a : List[str]=True ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = hidden_states
__snake_case : Dict = self.norma(_lowerCAmelCase )
__snake_case : Optional[Any] = nn.swish(_lowerCAmelCase )
__snake_case : List[str] = self.conva(_lowerCAmelCase )
__snake_case : int = self.time_emb_proj(nn.swish(_lowerCAmelCase ) )
__snake_case : Tuple = jnp.expand_dims(jnp.expand_dims(_lowerCAmelCase , 1 ) , 1 )
__snake_case : Optional[int] = hidden_states + temb
__snake_case : Tuple = self.norma(_lowerCAmelCase )
__snake_case : Tuple = nn.swish(_lowerCAmelCase )
__snake_case : str = self.dropout(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = self.conva(_lowerCAmelCase )
if self.conv_shortcut is not None:
__snake_case : Union[str, Any] = self.conv_shortcut(_lowerCAmelCase )
return hidden_states + residual
| 286 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202 | 0 |
def __UpperCAmelCase ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = set({"(", "[", "{"} )
UpperCAmelCase__ = set({")", "]", "}"} )
UpperCAmelCase__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__A ) == 0 or (len(__A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__A ) == 0
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = input("Enter sequence of brackets: " )
if is_balanced(__A ):
print(__A , "is balanced" )
else:
print(__A , "is not balanced" )
if __name__ == "__main__":
main()
| 716 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A = 6378137.0
A = 6356752.314245
A = 637_8137
def __UpperCAmelCase ( __A , __A , __A , __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
UpperCAmelCase__ = atan((1 - flattening) * tan(radians(__A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ = haversine_distance(__A , __A , __A , __A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ = (b_lata + b_lata) / 2
UpperCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ = (sin(__A ) ** 2) * (cos(__A ) ** 2)
UpperCAmelCase__ = cos(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma - sin(__A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ = (cos(__A ) ** 2) * (sin(__A ) ** 2)
UpperCAmelCase__ = sin(sigma / 2 ) ** 2
UpperCAmelCase__ = (sigma + sin(__A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 0 |
import numpy
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
UpperCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_ = numpy.zeros(output_array.shape )
def _UpperCAmelCase ( self ) -> numpy.ndarray:
UpperCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _UpperCAmelCase ( self ) -> None:
UpperCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = input_arr
UpperCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case (__lowercase):
return 1 / (1 + numpy.exp(-value))
def _snake_case (__lowercase):
return (value) * (1 - (value))
def _snake_case ():
UpperCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
UpperCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase , output_array=__lowercase)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase , iterations=10 , give_loss=__lowercase)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 23 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __a = 20 ):
snake_case_ : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ : Optional[Any] = n // 2
return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 534 |
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Optional[Any] = 0
# Number of processes finished
snake_case_ : List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ : str = [0] * no_of_process
# List to include calculation results
snake_case_ : Optional[int] = [0] * no_of_process
# Sort by arrival time.
snake_case_ : str = [burst_time[i] for i in np.argsort(__a )]
snake_case_ : str = [process_name[i] for i in np.argsort(__a )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ : int = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ : str = arrival_time[i]
snake_case_ : Optional[Any] = 0
# Index showing the location of the process being performed
snake_case_ : Tuple = 0
# Saves the current response ratio.
snake_case_ : List[Any] = 0
for i in range(0 , __a ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ : Optional[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ : Optional[int] = temp
snake_case_ : Optional[Any] = i
# Calculate the turn around time
snake_case_ : Any = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : List[Any] = [0] * no_of_process
for i in range(0 , __a ):
snake_case_ : Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = ["""A""", """B""", """C""", """D""", """E"""]
_SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5]
_SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5]
_SCREAMING_SNAKE_CASE = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_SCREAMING_SNAKE_CASE = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 534 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCAmelCase : List[str] = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : Tuple = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
_lowerCAmelCase : Union[str, Any] = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 429 |
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = 'gpt_neo'
UpperCAmelCase : List[Any] = ['past_key_values']
UpperCAmelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=50257 , __snake_case : Any=2048 , __snake_case : Dict=2048 , __snake_case : Dict=24 , __snake_case : Union[str, Any]=[[["global", "local"], 12]] , __snake_case : Tuple=16 , __snake_case : List[str]=None , __snake_case : Tuple=256 , __snake_case : Optional[int]="gelu_new" , __snake_case : str=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=50256 , __snake_case : Union[str, Any]=50256 , **__snake_case : Optional[int] , ) -> Any:
_a : List[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : List[Any] = hidden_size
_a : Tuple = num_layers
_a : List[Any] = num_heads
_a : List[Any] = intermediate_size
_a : Dict = window_size
_a : Tuple = activation_function
_a : Dict = resid_dropout
_a : List[str] = embed_dropout
_a : Optional[Any] = attention_dropout
_a : Dict = classifier_dropout
_a : str = layer_norm_epsilon
_a : Tuple = initializer_range
_a : Union[str, Any] = use_cache
_a : Tuple = bos_token_id
_a : int = eos_token_id
_a : List[Any] = attention_types
_a : str = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def snake_case_ ( __snake_case : Tuple ) -> str:
_a : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
import torch
_a : Dict = input.size()
_a : Tuple = len(UpperCamelCase_ )
_a : Tuple = shape[dimension]
_a : Union[str, Any] = torch.arange(0 , UpperCamelCase_ , UpperCamelCase_ )
_a : Optional[Any] = torch.div(sizedim - size , UpperCamelCase_ , rounding_mode='''floor''' ) + 1
_a : int = torch.arange(UpperCamelCase_ ) + low_indices[:min_length][:, None]
_a : Tuple = [slice(UpperCamelCase_ )] * rank
_a : List[str] = indices
_a : str = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
import torch
_a : List[Any] = torch.arange(1 , UpperCamelCase_ )
_a : Dict = torch.remainder(UpperCamelCase_ , UpperCamelCase_ )
_a : Any = remainders == 0
_a : List[str] = candidates[divisor_indices]
_a : str = torch.max(UpperCamelCase_ )
return largest_divisor, torch.div(UpperCamelCase_ , UpperCamelCase_ , rounding_mode='''floor''' )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
_a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='''inputs''' )
_a : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case_ ( self : Any ) -> int:
return self._config.num_heads
def snake_case_ ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_a : Tuple = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
_a : Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Union[str, Any] = seqlen + 2
_a : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Optional[Any] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
_a : Dict = common_inputs['''attention_mask''']
if self.use_past:
_a : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
_a : str = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self : Any ) -> int:
return 13
| 249 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self : Any , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : str=99 , __snake_case : Dict=24 , __snake_case : int=2 , __snake_case : Dict=6 , __snake_case : str=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=None , __snake_case : Any=1000 , ) -> str:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : List[str] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : str = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : str = num_labels
_a : int = scope
_a : Tuple = range_bbox
def snake_case_ ( self : Any ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Any = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : str = bbox[i, j, 0]
_a : List[Any] = t
_a : Any = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Union[str, Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , ) -> Any:
_a : Union[str, Any] = LiltModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Union[str, Any] = model(__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , ) -> Tuple:
_a : List[str] = self.num_labels
_a : Optional[Any] = LiltForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[Any] = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ) -> Optional[int]:
_a : List[str] = LiltForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[int] ) -> List[str]:
return True
def snake_case_ ( self : int ) -> Dict:
_a : Union[str, Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : str ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def snake_case_ ( self : Dict ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = LiltModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> str:
_a : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__snake_case )
_a : List[str] = torch.tensor([[1, 2]] , device=__snake_case )
_a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=__snake_case , bbox=__snake_case )
_a : Optional[Any] = torch.Size([1, 2, 768] )
_a : Optional[Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __snake_case , atol=1E-3 ) )
| 249 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case ( _A , _A ):
_A = 1
@register_to_config
def __init__( self ,UpperCamelCase=2_000 ,UpperCamelCase=0.1 ,UpperCamelCase=20 ,UpperCamelCase=1E-3 ) -> Dict:
snake_case__ :List[Any] = None
snake_case__ :Optional[Any] = None
snake_case__ :Union[str, Any] = None
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[str]:
snake_case__ :Optional[Any] = torch.linspace(1 ,self.config.sampling_eps ,UpperCamelCase ,device=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> int:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case__ :Union[str, Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case__ :Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case__ :List[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case__ :Optional[Any] = std.unsqueeze(-1 )
snake_case__ :List[Any] = -score / std
# compute
snake_case__ :Optional[Any] = -1.0 / len(self.timesteps )
snake_case__ :str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case__ :List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case__ :List[Any] = beta_t.unsqueeze(-1 )
snake_case__ :Dict = -0.5 * beta_t * x
snake_case__ :Tuple = torch.sqrt(UpperCamelCase )
snake_case__ :Tuple = drift - diffusion**2 * score
snake_case__ :Any = x + drift * dt
# add noise
snake_case__ :str = randn_tensor(x.shape ,layout=x.layout ,generator=UpperCamelCase ,device=x.device ,dtype=x.dtype )
snake_case__ :Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps | 241 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _snake_case ( _A ):
_A = 'char'
_A = 'bpe'
_A = 'wp'
__UpperCAmelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _snake_case ( _A ):
_A = ['image_processor', 'char_tokenizer']
_A = 'ViTImageProcessor'
_A = 'MgpstrTokenizer'
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,**UpperCamelCase ) -> Any:
snake_case__ :Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,UpperCamelCase ,)
snake_case__ :int = kwargs.pop("feature_extractor" )
snake_case__ :int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
snake_case__ :Union[str, Any] = tokenizer
snake_case__ :Any = AutoTokenizer.from_pretrained("gpt2" )
snake_case__ :str = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(UpperCamelCase ,UpperCamelCase )
def __call__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,**UpperCamelCase ) -> Dict:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
snake_case__ :Tuple = self.image_processor(UpperCamelCase ,return_tensors=UpperCamelCase ,**UpperCamelCase )
if text is not None:
snake_case__ :Optional[Any] = self.char_tokenizer(UpperCamelCase ,return_tensors=UpperCamelCase ,**UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ :str = encodings["input_ids"]
return inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]:
snake_case__ , snake_case__ , snake_case__ :int = sequences
snake_case__ :int = char_preds.size(0 )
snake_case__ , snake_case__ :Dict = self._decode_helper(UpperCamelCase ,"char" )
snake_case__ , snake_case__ :List[str] = self._decode_helper(UpperCamelCase ,"bpe" )
snake_case__ , snake_case__ :List[Any] = self._decode_helper(UpperCamelCase ,"wp" )
snake_case__ :Any = []
snake_case__ :Optional[int] = []
for i in range(UpperCamelCase ):
snake_case__ :Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ :Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ :Dict = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ :Optional[int] = {}
snake_case__ :List[str] = final_strs
snake_case__ :Union[str, Any] = final_scores
snake_case__ :Union[str, Any] = char_strs
snake_case__ :Optional[Any] = bpe_strs
snake_case__ :Union[str, Any] = wp_strs
return out
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ :Dict = self.char_decode
snake_case__ :Dict = 1
snake_case__ :List[Any] = "[s]"
elif format == DecodeType.BPE:
snake_case__ :Optional[Any] = self.bpe_decode
snake_case__ :Optional[Any] = 2
snake_case__ :Dict = "#"
elif format == DecodeType.WORDPIECE:
snake_case__ :int = self.wp_decode
snake_case__ :Union[str, Any] = 102
snake_case__ :Optional[int] = "[SEP]"
else:
raise ValueError(f'Format {format} is not supported.' )
snake_case__ , snake_case__ :List[str] = [], []
snake_case__ :List[str] = pred_logits.size(0 )
snake_case__ :Optional[int] = pred_logits.size(1 )
snake_case__ , snake_case__ :Dict = pred_logits.topk(1 ,dim=-1 ,largest=UpperCamelCase ,sorted=UpperCamelCase )
snake_case__ :Optional[Any] = preds_index.view(-1 ,UpperCamelCase )[:, 1:]
snake_case__ :Optional[int] = decoder(UpperCamelCase )
snake_case__ , snake_case__ :List[Any] = torch.nn.functional.softmax(UpperCamelCase ,dim=2 ).max(dim=2 )
snake_case__ :str = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ :Optional[Any] = preds_str[index].find(UpperCamelCase )
snake_case__ :Tuple = preds_str[index][:pred_eos]
snake_case__ :Dict = preds_index[index].cpu().tolist()
snake_case__ :Dict = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ :str = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ :Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict:
snake_case__ :Optional[int] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs | 241 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[int]=7 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : str=30 , __magic_name__ : Optional[int]=400 , __magic_name__ : str=True , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=True , __magic_name__ : Any=1 / 255 , __magic_name__ : List[Any]=True , __magic_name__ : Tuple=[0.5, 0.5, 0.5] , __magic_name__ : Any=[0.5, 0.5, 0.5] , __magic_name__ : str=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_pad
def __A ( self : List[Any] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __A ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[Any]=False ) -> Dict:
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = DetrImageProcessingTester(self )
@property
def __A ( self : Any ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "image_mean" ) )
self.assertTrue(hasattr(__magic_name__ , "image_std" ) )
self.assertTrue(hasattr(__magic_name__ , "do_normalize" ) )
self.assertTrue(hasattr(__magic_name__ , "do_rescale" ) )
self.assertTrue(hasattr(__magic_name__ , "rescale_factor" ) )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_pad" ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def __A ( self : Optional[Any] ) -> Tuple:
pass
def __A ( self : Any ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : Any ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self : Union[str, Any] ) -> Optional[Any]:
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
SCREAMING_SNAKE_CASE_ = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __magic_name__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __magic_name__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __magic_name__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __magic_name__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __magic_name__ ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __magic_name__ ) )
@slow
def __A ( self : Tuple ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
SCREAMING_SNAKE_CASE_ = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __magic_name__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __magic_name__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __magic_name__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __magic_name__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __magic_name__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __magic_name__ ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __magic_name__ ) )
| 356 | from __future__ import annotations
def a__ ( __UpperCamelCase ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<sep>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<cls>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
snake_case__ : int =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
snake_case__ : Dict ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] =3
snake_case__ : Union[str, Any] =do_lower_case
snake_case__ : Optional[Any] =remove_space
snake_case__ : Optional[Any] =keep_accents
snake_case__ : str =vocab_file
snake_case__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case__ : Any =jieba
snake_case__ : List[str] =str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] =self.__dict__.copy()
snake_case__ : Tuple =None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[Any] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : int ={}
snake_case__ : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if self.remove_space:
snake_case__ : Union[str, Any] =''' '''.join(inputs.strip().split() )
else:
snake_case__ : Any =inputs
snake_case__ : str =outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
snake_case__ : Union[str, Any] =unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict =''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
snake_case__ : List[str] =outputs.lower()
return outputs
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] =self.preprocess_text(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =[]
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case__ : Union[str, Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : str =cur_pieces[1:]
else:
snake_case__ : str =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
snake_case__ : List[str] =''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Optional[Any] =[self.sep_token_id]
snake_case__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] =[self.sep_token_id]
snake_case__ : List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Any =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
snake_case__ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] =super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 381 |
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : int =len(SCREAMING_SNAKE_CASE )
snake_case__ : int =len(SCREAMING_SNAKE_CASE )
snake_case__ : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case__ : list =[]
for char_count in range(SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 381 | 1 |
'''simple docstring'''
def _UpperCamelCase ( _a : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = [0 for i in range(n + 1 )]
__UpperCamelCase : str = 1
__UpperCamelCase : Optional[int] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _a ):
__UpperCamelCase : Dict = 1
__UpperCamelCase : Any = 0
for i in range(_a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCamelCase ( _a : NDArray[floataa] , _a : NDArray[floataa] , _a : list[int] , _a : int , ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase : Tuple = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_a )
if colsa != 1:
__UpperCamelCase : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_a )
if rowsa != rowsa:
__UpperCamelCase : Optional[int] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_a )
if len(_a ) != rowsa:
__UpperCamelCase : Union[str, Any] = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(_a )} and {rowsa}"""
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
__UpperCamelCase : List[Any] = []
for row in range(_a ):
__UpperCamelCase : List[Any] = 0
for col in range(_a ):
if col == row:
__UpperCamelCase : Optional[int] = table[row][col]
elif col == cols - 1:
__UpperCamelCase : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase : Any = (temp + val) / denom
new_val.append(_a )
__UpperCamelCase : List[Any] = new_val
return [float(_a ) for i in new_val]
def _UpperCamelCase ( _a : NDArray[floataa] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = table.shape
__UpperCamelCase : str = True
for i in range(0 , _a ):
__UpperCamelCase : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum):
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
@add_end_docstrings(_a)
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[Any] , *__A : List[str] , **__A : List[Any] ) ->List[Any]:
"""simple docstring"""
super().__init__(*__A , **__A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a__ :Optional[int] = None
if self.model.config.prefix is not None:
a__ :Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a__ :Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a__ , a__ , a__ :int = self._sanitize_parameters(prefix=__A , **self._forward_params )
a__ :Dict = {**self._preprocess_params, **preprocess_params}
a__ :Any = {**self._forward_params, **forward_params}
def _snake_case ( self : Optional[int] , __A : List[str]=None , __A : int=None , __A : Any=None , __A : Optional[Any]=None , __A : Tuple=None , __A : int=None , __A : Any=None , __A : Any=None , **__A : List[Any] , ) ->Tuple:
"""simple docstring"""
a__ :List[str] = {}
if prefix is not None:
a__ :Optional[Any] = prefix
if prefix:
a__ :List[str] = self.tokenizer(
__A , padding=__A , add_special_tokens=__A , return_tensors=self.framework )
a__ :Any = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
a__ :Dict = handle_long_generation
preprocess_params.update(__A )
a__ :List[Any] = generate_kwargs
a__ :List[str] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
a__ :Any = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
a__ :Optional[Any] = ReturnType.TENSORS
if return_type is not None:
a__ :Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
a__ :Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
a__ :Optional[int] = self.tokenizer.encode(__A , add_special_tokens=__A )
if len(__A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
a__ :Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : Dict , *__A : Tuple , **__A : str ) ->int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*__A , **__A )
def __call__( self : Union[str, Any] , __A : Optional[Any] , **__A : Any ) ->Union[str, Any]:
"""simple docstring"""
return super().__call__(__A , **__A )
def _snake_case ( self : List[Any] , __A : Tuple , __A : int="" , __A : Union[str, Any]=None , **__A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :Tuple = self.tokenizer(
prefix + prompt_text , padding=__A , add_special_tokens=__A , return_tensors=self.framework )
a__ :str = prompt_text
if handle_long_generation == "hole":
a__ :List[str] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
a__ :Any = generate_kwargs["max_new_tokens"]
else:
a__ :Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a__ :Optional[int] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
a__ :Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
a__ :List[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _snake_case ( self : Union[str, Any] , __A : List[Any] , **__A : Union[str, Any] ) ->Dict:
"""simple docstring"""
a__ :Any = model_inputs["input_ids"]
a__ :List[Any] = model_inputs.get("attention_mask" , __A )
# Allow empty prompts
if input_ids.shape[1] == 0:
a__ :Union[str, Any] = None
a__ :Union[str, Any] = None
a__ :str = 1
else:
a__ :Optional[Any] = input_ids.shape[0]
a__ :Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a__ :Optional[Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
a__ :Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
a__ :str = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a__ :List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a__ :Optional[int] = self.model.generate(input_ids=__A , attention_mask=__A , **__A )
a__ :List[str] = generated_sequence.shape[0]
if self.framework == "pt":
a__ :Union[str, Any] = generated_sequence.reshape(__A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a__ :int = tf.reshape(__A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _snake_case ( self : List[Any] , __A : List[str] , __A : str=ReturnType.FULL_TEXT , __A : Any=True ) ->List[str]:
"""simple docstring"""
a__ :Optional[Any] = model_outputs["generated_sequence"][0]
a__ :List[Any] = model_outputs["input_ids"]
a__ :Optional[Any] = model_outputs["prompt_text"]
a__ :int = generated_sequence.numpy().tolist()
a__ :Optional[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a__ :Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a__ :List[Any] = self.tokenizer.decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a__ :Any = 0
else:
a__ :Tuple = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , ) )
if return_type == ReturnType.FULL_TEXT:
a__ :Optional[int] = prompt_text + text[prompt_length:]
else:
a__ :Optional[int] = text[prompt_length:]
a__ :str = {"generated_text": all_text}
records.append(__A )
return records
| 395 |
import math
def lowerCamelCase__ ( a : list , a : int ) -> int:
"""simple docstring"""
a__ :str = len(a )
a__ :List[str] = int(math.floor(math.sqrt(a ) ) )
a__ :int = 0
while arr[min(a , a ) - 1] < x:
a__ :Union[str, Any] = step
step += int(math.floor(math.sqrt(a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a__ :str = prev + 1
if prev == min(a , a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
snake_case__ = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ = [int(item) for item in user_input.split(''',''')]
snake_case__ = int(input('''Enter the number to be searched:\n'''))
snake_case__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 395 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = 'unispeech-sat'
def __init__( self , snake_case=32 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1E-5 , snake_case="group" , snake_case="gelu" , snake_case=(512, 512, 512, 512, 512, 512, 512) , snake_case=(5, 2, 2, 2, 2, 2, 2) , snake_case=(10, 3, 3, 3, 3, 2, 2) , snake_case=False , snake_case=128 , snake_case=16 , snake_case=False , snake_case=True , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=0 , snake_case=320 , snake_case=2 , snake_case=0.1 , snake_case=100 , snake_case=256 , snake_case=256 , snake_case=0.1 , snake_case="mean" , snake_case=False , snake_case=False , snake_case=256 , snake_case=(512, 512, 512, 512, 1500) , snake_case=(5, 3, 3, 1, 1) , snake_case=(1, 2, 3, 1, 1) , snake_case=512 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=504 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_norm
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_clusters
UpperCamelCase__ = do_stable_layer_norm
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ = num_codevectors_per_group
UpperCamelCase__ = num_codevector_groups
UpperCamelCase__ = contrastive_logits_temperature
UpperCamelCase__ = feat_quantizer_dropout
UpperCamelCase__ = num_negatives
UpperCamelCase__ = codevector_dim
UpperCamelCase__ = proj_codevector_dim
UpperCamelCase__ = diversity_loss_weight
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = xvector_output_dim
@property
def snake_case__ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 185 |
def UpperCamelCase_( _A :List[str] , _A :Tuple , _A :Any , _A :Tuple , _A :Optional[int] , _A :str )-> List[str]:
if index == r:
for j in range(_A ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase__ = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase_( _A :Dict , _A :Optional[Any] , _A :Union[str, Any] )-> Optional[int]:
# A temporary array to store all combination one by one
UpperCamelCase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCamelCase = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 185 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : Optional[Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Tuple = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = [parquet_path]
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_lowerCamelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Any = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if split:
_lowerCamelCase : Union[str, Any] = {split: parquet_path}
else:
_lowerCamelCase : Optional[Any] = "train"
_lowerCamelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Dict = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCamelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCamelCase : Optional[Any] = {"image": [image_path]}
_lowerCamelCase : List[str] = Features({"image": Image()} )
_lowerCamelCase : List[str] = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_lowerCamelCase : List[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCamelCase : List[str] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected | 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass | 46 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __a ( A__ , A__ ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase = torch.permute(UpperCAmelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase__ ):
# linear layer
lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __a ( A__ , A__ , A__ ) -> int:
if "metadata" in layer:
lowerCAmelCase = layer.split("metadata" )
lowerCAmelCase = "".join(split_layer[0] )[:-1]
lowerCAmelCase = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
lowerCAmelCase = layer.split("kvstore" )
lowerCAmelCase = "".join(split_layer[0] )[:-1]
lowerCAmelCase = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
lowerCAmelCase = layer.split("/" )
lowerCAmelCase = "/".join(split_layer[:-1] )
lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
lowerCAmelCase = "file"
else:
lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __a ( A__ , A__ ) -> Tuple:
lowerCAmelCase = rename_keys(UpperCAmelCase__ )
lowerCAmelCase = {}
for k, v in current_block.items():
lowerCAmelCase = v
lowerCAmelCase = new_current_block
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def __a ( A__ , A__ , A__ , A__ , A__ = WEIGHTS_NAME ) -> Union[str, Any]:
lowerCAmelCase = convert_file_size_to_int(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 0
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
lowerCAmelCase = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
lowerCAmelCase = flatten_dict(UpperCAmelCase__ , sep="/" )
lowerCAmelCase = {}
for layer in checkpoint_info.keys():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_key_and_tensorstore_dict(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if curr_real_layer_name in all_layers:
lowerCAmelCase = content
else:
lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCAmelCase = torch.tensor(UpperCAmelCase__ )
lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCAmelCase , lowerCAmelCase = rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCAmelCase__ )
lowerCAmelCase = "/".join(UpperCAmelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , weights_name.replace(".bin" , f"-{len(UpperCAmelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCAmelCase__ , UpperCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = raw_weights.to(getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase = os.path.join(UpperCAmelCase__ , weights_name.replace(".bin" , f"-{len(UpperCAmelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCAmelCase__ , UpperCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase = {}
lowerCAmelCase = {}
for idx, shard in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = weights_name.replace(
".bin" , f"-{idx+1:05d}-of-{len(UpperCAmelCase__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
lowerCAmelCase = os.path.join(UpperCAmelCase__ , weights_name.replace(".bin" , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = shard
for key in shard:
lowerCAmelCase = shard_file
# Add the metadata
lowerCAmelCase = {"total_size": total_size}
lowerCAmelCase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , "w" , encoding="utf-8" ) as f:
lowerCAmelCase = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + "\n"
f.write(UpperCAmelCase__ )
return metadata, index
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowercase : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __a ( ) -> List[str]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
lowerCAmelCase = TaTokenizer.from_pretrained("t5-small" )
lowerCAmelCase = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors="pt" ).input_ids
lowerCAmelCase = model.generate(UpperCAmelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'transfo-xl'
lowerCAmelCase = ['mems']
lowerCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=2_6_7_7_3_5 , SCREAMING_SNAKE_CASE : Dict=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : List[str]=6_4 , SCREAMING_SNAKE_CASE : int=4_0_9_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=1_8 , SCREAMING_SNAKE_CASE : Dict=1_6_0_0 , SCREAMING_SNAKE_CASE : Any=1_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=-1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int="normal" , SCREAMING_SNAKE_CASE : Optional[int]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase = [False] + [False] * len(self.cutoffs )
lowerCAmelCase = d_model
lowerCAmelCase = d_embed
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = div_val
lowerCAmelCase = pre_lnorm
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = mem_len
lowerCAmelCase = same_length
lowerCAmelCase = attn_type
lowerCAmelCase = clamp_len
lowerCAmelCase = sample_softmax
lowerCAmelCase = adaptive
lowerCAmelCase = dropout
lowerCAmelCase = dropatt
lowerCAmelCase = untie_r
lowerCAmelCase = init
lowerCAmelCase = init_range
lowerCAmelCase = proj_init_std
lowerCAmelCase = init_std
lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 159 | 0 |
import math
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
UpperCAmelCase = 0
UpperCAmelCase = 0
while num > 0:
UpperCAmelCase = num % 8
UpperCAmelCase = octal + (remainder * math.floor(math.pow(1_0 , lowerCAmelCase_ ) ))
counter += 1
UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(lowerCAmelCase_ )}"""
def _UpperCamelCase ( ) ->None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(6_5 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_1_6 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 377 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( __snake_case ):
def __init__( self : Dict , *__lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def _lowercase ( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str = "eval" ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(__lowerCamelCase )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : str = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 377 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a= None
a= '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a= [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __lowercase :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "PIL.Image.Image"
SCREAMING_SNAKE_CASE__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
SCREAMING_SNAKE_CASE__ = field(default='''Image''' , init=_lowerCamelCase , repr=_lowerCamelCase )
def __call__( self ):
return self.pa_type
def lowerCAmelCase ( self , _lowerCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = np.array(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCamelCase )
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCamelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
__UpperCamelCase : List[str] = {}
__UpperCamelCase , __UpperCamelCase : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_lowerCamelCase ):
__UpperCamelCase : List[str] = PIL.Image.open(_lowerCamelCase )
else:
__UpperCamelCase : Union[str, Any] = path.split('::' )[-1]
try:
__UpperCamelCase : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )['repo_id']
__UpperCamelCase : Any = token_per_repo_id.get(_lowerCamelCase )
except ValueError:
__UpperCamelCase : str = None
with xopen(_lowerCamelCase , 'rb' , use_auth_token=_lowerCamelCase ) as f:
__UpperCamelCase : str = BytesIO(f.read() )
__UpperCamelCase : Union[str, Any] = PIL.Image.open(bytes_ )
else:
__UpperCamelCase : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def lowerCAmelCase ( self , _lowerCamelCase ):
if pa.types.is_string(storage.type ):
__UpperCamelCase : Any = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
__UpperCamelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCamelCase : List[str] = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
__UpperCamelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__UpperCamelCase : Union[str, Any] = storage.field('bytes' )
else:
__UpperCamelCase : Any = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__UpperCamelCase : Union[str, Any] = storage.field('path' )
else:
__UpperCamelCase : Optional[int] = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
__UpperCamelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__UpperCamelCase : List[str] = pa.array(
[encode_np_array(np.array(_lowerCamelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__UpperCamelCase : Tuple = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
__UpperCamelCase : str = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self , _lowerCamelCase ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase , 'rb' ) as f:
__UpperCamelCase : Tuple = f.read()
return bytes_
__UpperCamelCase : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCamelCase : Dict = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__UpperCamelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def _UpperCamelCase ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__UpperCamelCase : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( _a : "PIL.Image.Image" ):
"""simple docstring"""
__UpperCamelCase : List[str] = BytesIO()
if image.format in list_image_compression_formats():
__UpperCamelCase : Optional[Any] = image.format
else:
__UpperCamelCase : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_a , format=_a )
return buffer.getvalue()
def _UpperCamelCase ( _a : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_a , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_a )}
def _UpperCamelCase ( _a : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
__UpperCamelCase : List[str] = array.dtype
__UpperCamelCase : Union[str, Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
__UpperCamelCase : Any = dtype.kind
__UpperCamelCase : int = dtype.itemsize
__UpperCamelCase : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__UpperCamelCase : Dict = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__UpperCamelCase : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__UpperCamelCase : List[Any] = dtype_byteorder + dtype_kind + str(_a )
__UpperCamelCase : int = np.dtype(_a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
__UpperCamelCase : Any = PIL.Image.fromarray(array.astype(_a ) )
return {"path": None, "bytes": image_to_bytes(_a )}
def _UpperCamelCase ( _a : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
__UpperCamelCase , __UpperCamelCase : Any = first_non_null_value(_a )
if isinstance(_a , _a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_a , np.ndarray ):
__UpperCamelCase : int = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
elif isinstance(_a , PIL.Image.Image ):
__UpperCamelCase : int = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
else:
return objs
else:
return objs
| 287 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a= {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__ : Optional[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __snake_case (lowerCamelCase ):
__a = '''facebook/nllb-200-distilled-600M'''
__a = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
__a = '''translator'''
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ['''text''', '''text''', '''text''']
__a = ['''text''']
def __a ( self: Any , A_: Optional[int] , A_: Dict , A_: Optional[Any] ):
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="""pt""" , src_lang=A_ , tgt_lang=A_ )
def __a ( self: Tuple , A_: Dict ):
return self.model.generate(**A_ )
def __a ( self: List[Any] , A_: List[Any] ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 281 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__ : Optional[Any] = 'http://www.mocksite.com/file1.txt'
__magic_name__ : Tuple = '"text": ["foo", "foo"]'
__magic_name__ : str = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __snake_case :
__a = 200
__a = {'''Content-Length''': '''100'''}
__a = {}
def __a ( self: List[str] , **A_: List[Any] ):
return [bytes(A_ , """utf-8""" )]
def a_ ( *lowercase__ :List[Any], **lowercase__ :Any ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""", [str, list, dict] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Any, lowercase__ :Optional[int] ):
import requests
monkeypatch.setattr(lowercase__, """request""", lowercase__ )
__lowerCamelCase = URL
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = url
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [url]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": url}
__lowerCamelCase = """dummy"""
__lowerCamelCase = """downloads"""
__lowerCamelCase = tmp_path
__lowerCamelCase = DownloadConfig(
cache_dir=os.path.join(lowercase__, lowercase__ ), use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.download(lowercase__ )
__lowerCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [downloaded_paths]
__lowerCamelCase = [urls]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in downloaded_paths.keys()
__lowerCamelCase = downloaded_paths.values()
__lowerCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__, lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCamelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCamelCase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__lowerCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""", [str, list, dict] )
def a_ ( lowercase__ :Dict, lowercase__ :Optional[Any], lowercase__ :Dict ):
__lowerCamelCase = str(lowercase__ )
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = filename
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [filename]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": filename}
__lowerCamelCase = """dummy"""
__lowerCamelCase = xz_file.parent
__lowerCamelCase = """extracted"""
__lowerCamelCase = DownloadConfig(
cache_dir=lowercase__, use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.extract(lowercase__ )
__lowerCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [extracted_paths]
__lowerCamelCase = [paths]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in extracted_paths.keys()
__lowerCamelCase = extracted_paths.values()
__lowerCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__, lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__, etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCamelCase = extracted_path.read_text()
__lowerCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a_ ( lowercase__ :List[str], lowercase__ :int ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowercase__, start=1 ):
__lowerCamelCase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""", ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Union[str, Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""", ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :List[Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def a_ ( lowercase__ :Tuple ):
__lowerCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ), start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 281 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''resnet'''
UpperCAmelCase__ = ['''basic''', '''bottleneck''']
def __init__( self :List[Any] , _lowerCamelCase :Optional[Any]=3 , _lowerCamelCase :Any=64 , _lowerCamelCase :Tuple=[256, 512, 1_024, 2_048] , _lowerCamelCase :Dict=[3, 4, 6, 3] , _lowerCamelCase :Dict="bottleneck" , _lowerCamelCase :str="relu" , _lowerCamelCase :Optional[int]=False , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Any=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
UpperCamelCase_ : Dict =num_channels
UpperCamelCase_ : str =embedding_size
UpperCamelCase_ : Tuple =hidden_sizes
UpperCamelCase_ : Tuple =depths
UpperCamelCase_ : Optional[Any] =layer_type
UpperCamelCase_ : str =hidden_act
UpperCamelCase_ : List[str] =downsample_in_first_stage
UpperCamelCase_ : Optional[Any] =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
UpperCamelCase_ , UpperCamelCase_ : List[str] =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class a__ ( A__ ):
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return 1E-3
| 395 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 | 1 |
import heapq
import sys
import numpy as np
UpperCAmelCase_ = tuple[int, int]
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = []
_UpperCAmelCase = set()
def UpperCamelCase( self ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def UpperCamelCase( self ):
return len(self.elements ) == 0
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase( self , _UpperCamelCase ):
if item in self.set:
self.set.remove(_UpperCamelCase )
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase( self ):
return self.elements[0][1]
def UpperCamelCase( self ):
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> Any:
"""simple docstring"""
_UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
return np.linalg.norm(a - b )
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> int:
"""simple docstring"""
return consistent_heuristic(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) // t
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> Tuple:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : dict[TPos, float] ) -> str:
"""simple docstring"""
_UpperCAmelCase = g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return ans
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = '''*'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase = '''#'''
_UpperCAmelCase = '''-'''
_UpperCAmelCase = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) = x
# print(x)
_UpperCAmelCase = '''-'''
_UpperCAmelCase = back_pointer[x]
_UpperCAmelCase = '''-'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
_UpperCAmelCase = back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE_ , end=''' ''' )
_UpperCAmelCase = back_pointer[x]
print(SCREAMING_SNAKE_CASE_ )
sys.exit()
def A__ ( SCREAMING_SNAKE_CASE_ : TPos ) -> Tuple:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]:
"""simple docstring"""
for itera in range(SCREAMING_SNAKE_CASE_ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE_ )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) = s
_UpperCAmelCase = (x - 1, y)
_UpperCAmelCase = (x + 1, y)
_UpperCAmelCase = (x, y + 1)
_UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = -1
_UpperCAmelCase = float('''inf''' )
if valid(SCREAMING_SNAKE_CASE_ ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase = g_function[s] + 1
_UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE_ ):
if key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) <= Wa * key(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
open_list[j].put(
SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def A__ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCAmelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCAmelCase_ = make_common_ground()
UpperCAmelCase_ = blocks_blk
# hyper parameters
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 20
UpperCAmelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (n - 1, n - 1)
UpperCAmelCase_ = 1
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {start: 0, goal: float('''inf''' )}
_UpperCAmelCase = {start: -1, goal: -1}
_UpperCAmelCase = []
_UpperCAmelCase = set()
for i in range(SCREAMING_SNAKE_CASE_ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_inad.append(SCREAMING_SNAKE_CASE_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 32 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = XLMTokenizer
_UpperCAmelCase : List[Any] = False
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase__ : List[Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : str , A : Dict ) ->Tuple:
lowerCamelCase__ : str = '''lower newer'''
lowerCamelCase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Optional[Any] = '''lower'''
lowerCamelCase__ : Any = ['''low''', '''er</w>''']
lowerCamelCase__ : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase__ : int = tokens + ['''<unk>''']
lowerCamelCase__ : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase__ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase__ : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 315 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=UpperCamelCase_ ):
A_ : int = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Any , *a__ : Dict , **a__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls : Optional[Any] , *a__ : List[Any] , **a__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls : Tuple , *a__ : List[Any] , **a__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 712 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase_ ( lowerCamelCase_ = 2_0_0_0_0_0_0 ):
"""simple docstring"""
lowerCAmelCase__ : list[int] = [0]
lowerCAmelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase__ : int = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase__ : float
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the triangle number corresponding to b_floor
lowerCAmelCase__ : int
# the triangle number corresponding to b_ceil
lowerCAmelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase__ : Optional[int] = floor(lowerCamelCase_ )
lowerCAmelCase__ : Any = ceil(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = triangle_numbers[b_floor]
lowerCAmelCase__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Tuple = triangle_b_first_guess * triangle_a
lowerCAmelCase__ : List[str] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Dict = triangle_b_second_guess * triangle_a
lowerCAmelCase__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 568 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__magic_name__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCamelCase__ = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowerCamelCase__ = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowerCamelCase__ = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowerCamelCase__ = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowerCamelCase__ = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowerCamelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowerCamelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def __UpperCAmelCase ( self : Dict ):
import torch
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = pipeline("""text-classification""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = pipeline("""text-classification""" , framework="""tf""" )
lowerCamelCase__ = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowerCamelCase__ = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCamelCase__ = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCamelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ = """HuggingFace is in"""
lowerCamelCase__ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowerCamelCase__ = ["""HuggingFace is in """, """Paris is in France"""]
lowerCamelCase__ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ = text_classifier(SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}] * N, [{"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}] * N] , )
lowerCamelCase__ = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowerCamelCase__ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_classifier(SCREAMING_SNAKE_CASE_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE_ ), """score""": ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 129 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=224 , SCREAMING_SNAKE_CASE_ : Tuple=1000 , SCREAMING_SNAKE_CASE_ : List[str]=[3, 3, 6, 4] , SCREAMING_SNAKE_CASE_ : List[Any]=[48, 56, 112, 220] , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = num_labels
lowerCamelCase__ = image_size
lowerCamelCase__ = layer_depths
lowerCamelCase__ = embed_dims
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE_ , layer_scale_init_value=1e-5 , )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCamelCase__ = SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCamelCase__ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] ):
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = self.prepare_config_and_inputs()
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = SwiftFormerModelTester(self )
lowerCamelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __UpperCAmelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __UpperCAmelCase ( self : Tuple ):
pass
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __UpperCAmelCase ( self : int ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __UpperCAmelCase ( self : str ):
pass
def __UpperCAmelCase ( self : int ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
def _config_zero_init(SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1e-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = _config_zero_init(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return configs_no_init
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self : Dict ):
pass
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 129 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
A_ : Optional[Any] = parser.parse_args()
A_ : int = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 419 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 419 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 99 |
def a (lowerCAmelCase__ ):
__a = False
while is_sorted is False: # Until all the indices are traversed keep looping
__a = True
for i in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
for i in range(1 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
SCREAMING_SNAKE_CASE = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 99 | 1 |
"""simple docstring"""
__UpperCamelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __UpperCAmelCase ( _snake_case : str, _snake_case : str, _snake_case : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowercase = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 227 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
if n == 0:
return 0
_lowercase = float("-inf" )
for i in range(1, n + 1 ):
_lowercase = max(
_snake_case, prices[i - 1] + naive_cut_rod_recursive(n - i, _snake_case ) )
return max_revue
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
_lowercase = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_snake_case, _snake_case, _snake_case )
def __UpperCAmelCase ( _snake_case : int, _snake_case : list, _snake_case : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase = float("-inf" )
for i in range(1, n + 1 ):
_lowercase = max(
_snake_case, prices[i - 1] + _top_down_cut_rod_recursive(n - i, _snake_case, _snake_case ), )
_lowercase = max_revenue
return max_rev[n]
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase = [float("-inf" ) for _ in range(n + 1 )]
_lowercase = 0
for i in range(1, n + 1 ):
_lowercase = max_rev[i]
for j in range(1, i + 1 ):
_lowercase = max(_snake_case, prices[j - 1] + max_rev[i - j] )
_lowercase = max_revenue_i
return max_rev[n]
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
if n < 0:
_lowercase = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_snake_case )
if n > len(_snake_case ):
_lowercase = (
"Each integral piece of rod must have a corresponding price. "
f"""Got n = {n} but length of prices = {len(_snake_case )}"""
)
raise ValueError(_snake_case )
def __UpperCAmelCase ( ):
_lowercase = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
_lowercase = len(_snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase = 3_6
_lowercase = top_down_cut_rod(_snake_case, _snake_case )
_lowercase = bottom_up_cut_rod(_snake_case, _snake_case )
_lowercase = naive_cut_rod_recursive(_snake_case, _snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 227 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_0}
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( _lowercase, unittest.TestCase ):
'''simple docstring'''
_snake_case = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''crop_size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 212 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Tuple = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE : List[Any] = 1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : List[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[int] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[Any] = (1 + _cos) / 2
SCREAMING_SNAKE_CASE : str = -1 - _cos
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Dict = -2 * _cos
SCREAMING_SNAKE_CASE : int = 1 - alpha
SCREAMING_SNAKE_CASE : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[int] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[str] = _sin / 2
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = -ba
SCREAMING_SNAKE_CASE : Dict = 1 + alpha
SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : List[str] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : str = 1 - alpha
SCREAMING_SNAKE_CASE : List[str] = -2 * _cos
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha
SCREAMING_SNAKE_CASE : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : str = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + alpha * big_a
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha * big_a
SCREAMING_SNAKE_CASE : List[str] = 1 + alpha / big_a
SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha / big_a
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Any = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Any = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[str] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : Dict = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE : Tuple = 2 * big_a * mpc
SCREAMING_SNAKE_CASE : List[Any] = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE : Dict = ppmc + aaa
SCREAMING_SNAKE_CASE : int = -2 * pmpc
SCREAMING_SNAKE_CASE : Dict = ppmc - aaa
SCREAMING_SNAKE_CASE : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ):
SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[Any] = sin(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = cos(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : str = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Dict = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE : Any = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE : Optional[int] = pmc + aaa
SCREAMING_SNAKE_CASE : List[Any] = 2 * mpc
SCREAMING_SNAKE_CASE : str = pmc - aaa
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 507 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class a__ ( snake_case__ ):
_a : List[Any] = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_INIT_CONFIGURATION
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str] = RealmTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(_A , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**_A )
__lowerCAmelCase = do_lower_case
def __SCREAMING_SNAKE_CASE( self , _A , **_A ):
"""simple docstring"""
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("text_pair" , _A )
__lowerCAmelCase = kwargs.pop("return_tensors" , _A )
__lowerCAmelCase = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(_A , _A , return_tensors=_A , **_A )
__lowerCAmelCase = encoded_candidates.get("input_ids" )
__lowerCAmelCase = encoded_candidates.get("attention_mask" )
__lowerCAmelCase = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A , tensor_type=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A=None ):
"""simple docstring"""
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 706 |
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while repunit:
__lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 552 | 0 |
"""simple docstring"""
from itertools import count
def _lowerCamelCase ( UpperCAmelCase__ = 50 ) -> int:
'''simple docstring'''
a__ = [1] * min_block_length
for n in count(__snake_case ):
fill_count_functions.append(1 )
for block_length in range(__snake_case,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_A : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 | 0 |
from math import ceil, sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000000 ) -> Optional[int]:
lowercase__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45 | 0 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
_A = min(__snake_case ) # min() finds the minimum value
_A = max(__snake_case ) # max() finds the maximum value
_A = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_A = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_A = 0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
_A = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
_A = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print('Sorted order is:' , ' '.join(__snake_case ) )
if __name__ == "__main__":
main()
| 107 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Optional[int], lowercase : List[str], lowercase : List[str]=True, lowercase : str="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase, lowercase ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=lowercase, padding='''max_length''' if pad_to_max_length else None, truncation=lowercase, return_tensors=lowercase, add_special_tokens=lowercase, **lowercase, )
def a__ ( lowercase : str, lowercase : int, lowercase : Tuple=None, ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="" , ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
_UpperCamelCase = self.get_char_lens(self.src_file )
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
_UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ : Union[str, Any] = getLogger(__name__)
def a__ ( lowercase : List[List] ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase ) )
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(lowercase, os.path.join(lowercase, '''git_log.json''' ) )
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : str=4, **lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase, indent=lowercase, **lowercase )
def a__ ( lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
return json.load(lowercase )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=lowercase )
_UpperCamelCase = {
'''repo_id''': str(lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowercase : Callable, lowercase : Iterable ) -> List:
"""simple docstring"""
return list(map(lowercase, lowercase ) )
def a__ ( lowercase : List[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowercase, '''wb''' ) as f:
return pickle.dump(lowercase, lowercase )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowercase )
def white_space_fix(lowercase : Tuple ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[int] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = Counter(lowercase ) & Counter(lowercase )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowercase : Tuple, lowercase : Any ) -> List[str]:
"""simple docstring"""
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def a__ ( lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = 0
for hypo, pred in zip(lowercase, lowercase ):
em += exact_match_score(lowercase, lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase, lowercase, lowercase ):
if not hasattr(lowercase, lowercase ) and not hasattr(lowercase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase ) )
delattr(lowercase, lowercase )
continue
_UpperCamelCase = p if hasattr(lowercase, lowercase ) else equivalent_param[p]
setattr(lowercase, lowercase, getattr(lowercase, lowercase ) )
delattr(lowercase, lowercase )
return hparams, config
| 98 | 0 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return base * power(__snake_case ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a = int(input("Enter the base: ").strip())
_a = int(input("Enter the exponent: ").strip())
_a = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 719 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class a_ ( _lowerCAmelCase ):
def lowercase__ ( self : int , lowercase : List[Any]=None , lowercase : str=None , lowercase : Dict=None , **lowercase : int ):
"""simple docstring"""
if tokenize_kwargs is None:
lowercase_ :Any = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowercase_ :int = truncation
lowercase_ :Optional[Any] = tokenize_kwargs
lowercase_ :str = {}
if return_tensors is not None:
lowercase_ :Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def lowercase__ ( self : List[Any] , lowercase : Tuple , **lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :str = self.framework
lowercase_ :Union[str, Any] = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
return model_inputs
def lowercase__ ( self : List[str] , lowercase : int ):
"""simple docstring"""
lowercase_ :int = self.model(**lowercase )
return model_outputs
def lowercase__ ( self : Dict , lowercase : Optional[int] , lowercase : List[Any]=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *lowercase : Any , **lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(*lowercase , **lowercase )
| 172 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class a_ ( _lowerCAmelCase ):
__A = "swin2sr"
__A = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , lowercase : Union[str, Any]=64 , lowercase : Optional[int]=1 , lowercase : List[Any]=3 , lowercase : Tuple=180 , lowercase : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : Union[str, Any]=2.0 , lowercase : List[Any]=True , lowercase : Optional[int]=0.0 , lowercase : List[Any]=0.0 , lowercase : Optional[int]=0.1 , lowercase : str="gelu" , lowercase : str=False , lowercase : Optional[Any]=0.02 , lowercase : List[Any]=1e-5 , lowercase : int=2 , lowercase : Union[str, Any]=1.0 , lowercase : List[Any]="1conv" , lowercase : Optional[int]="pixelshuffle" , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :str = image_size
lowercase_ :int = patch_size
lowercase_ :Tuple = num_channels
lowercase_ :str = embed_dim
lowercase_ :int = depths
lowercase_ :Tuple = len(lowercase )
lowercase_ :Tuple = num_heads
lowercase_ :Any = window_size
lowercase_ :List[str] = mlp_ratio
lowercase_ :int = qkv_bias
lowercase_ :int = hidden_dropout_prob
lowercase_ :Optional[int] = attention_probs_dropout_prob
lowercase_ :int = drop_path_rate
lowercase_ :Tuple = hidden_act
lowercase_ :Tuple = use_absolute_embeddings
lowercase_ :int = layer_norm_eps
lowercase_ :List[Any] = initializer_range
lowercase_ :Tuple = upscale
lowercase_ :Any = img_range
lowercase_ :Optional[Any] = resi_connection
lowercase_ :Optional[int] = upsampler
| 172 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__lowerCAmelCase = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "rag"
snake_case__ = True
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Tuple=" / " , __SCREAMING_SNAKE_CASE : Any=" // " , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Dict=300 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : int="wiki_dpr" , __SCREAMING_SNAKE_CASE : Optional[int]="train" , __SCREAMING_SNAKE_CASE : Union[str, Any]="compressed" , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]:
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
a_ : Any = kwargs.pop('''question_encoder''' )
a_ : Dict = question_encoder_config.pop('''model_type''' )
a_ : Optional[int] = kwargs.pop('''generator''' )
a_ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
a_ : List[str] = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Tuple = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : List[str] = reduce_loss
a_ : Any = label_smoothing
a_ : Any = exclude_bos_score
a_ : Union[str, Any] = do_marginalize
a_ : Dict = title_sep
a_ : Union[str, Any] = doc_sep
a_ : Optional[Any] = n_docs
a_ : Tuple = max_combined_length
a_ : Dict = dataset
a_ : List[str] = dataset_split
a_ : int = index_name
a_ : int = retrieval_vector_size
a_ : int = retrieval_batch_size
a_ : int = passages_path
a_ : Optional[int] = index_path
a_ : List[str] = use_dummy_dataset
a_ : Optional[int] = output_retrieved
a_ : Any = do_deduplication
a_ : str = use_cache
if self.forced_eos_token_id is None:
a_ : Optional[Any] = getattr(self.generator , '''forced_eos_token_id''' , __SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : List[Any] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : List[str] = copy.deepcopy(self.__dict__ )
a_ : List[str] = self.question_encoder.to_dict()
a_ : str = self.generator.to_dict()
a_ : Dict = self.__class__.model_type
return output
| 721 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ (a__ , unittest.TestCase ):
'''simple docstring'''
_a = DiTPipeline
_a = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def _lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__a , )
lowerCamelCase_ : Any = AutoencoderKL()
lowerCamelCase_ : List[Any] = DDIMScheduler()
lowerCamelCase_ : Tuple = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self : Any , __a : Optional[int] , __a : List[str]=0 ) ->Optional[int]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(__a )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase_ : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
lowerCamelCase_ : Optional[Any] = """cpu"""
lowerCamelCase_ : Any = self.get_dummy_components()
lowerCamelCase_ : List[str] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase_ : str = self.get_dummy_inputs(__a )
lowerCamelCase_ : int = pipe(**__a ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase_ : int = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def _lowerCAmelCase ( self : str ) ->List[Any]:
self._test_inference_batch_single_identical(relax_max_difference=__a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self : List[str] ) ->Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[str] ) ->Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : int ) ->Tuple:
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase_ : str = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase_ : Tuple = pipe.get_label_ids(__a )
lowerCamelCase_ : str = pipe(__a , generator=__a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(__a , __a ):
lowerCamelCase_ : List[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
lowerCamelCase_ : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase_ : List[str] = ["""vase""", """umbrella"""]
lowerCamelCase_ : Any = pipe.get_label_ids(__a )
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(__a , generator=__a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(__a , __a ):
lowerCamelCase_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 278 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int , __a : str=13 , __a : Tuple=7 , __a : int=True , __a : int=True , __a : Dict=True , __a : str=True , __a : List[str]=99 , __a : Dict=64 , __a : Optional[Any]=32 , __a : List[Any]=5 , __a : Optional[int]=4 , __a : str=37 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : int=512 , __a : Optional[Any]=16 , __a : Any=2 , __a : Dict=0.02 , __a : str=3 , __a : List[Any]=4 , __a : List[str]=None , ) ->Optional[int]:
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Any = seq_length
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : int = use_input_mask
lowerCamelCase_ : int = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Any = embedding_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Dict = type_vocab_size
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Union[str, Any] = num_labels
lowerCamelCase_ : List[Any] = num_choices
lowerCamelCase_ : Dict = scope
def _lowerCAmelCase ( self : int ) ->str:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Tuple = None
if self.use_input_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Tuple , __a : int , __a : List[str] , __a : Tuple , __a : Any , __a : Union[str, Any] , __a : int , __a : Any ) ->Tuple:
lowerCamelCase_ : Tuple = MobileBertModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase_ : Optional[int] = model(__a , token_type_ids=__a )
lowerCamelCase_ : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Dict , __a : List[Any] , __a : List[str] ) ->Tuple:
lowerCamelCase_ : List[Any] = MobileBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict ) ->int:
lowerCamelCase_ : Tuple = MobileBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Any = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : str , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Dict ) ->List[Any]:
lowerCamelCase_ : Optional[int] = MobileBertForPreTraining(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : List[str] , __a : Tuple , __a : int , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] , __a : Any ) ->List[str]:
lowerCamelCase_ : Dict = MobileBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : str , __a : int , __a : Dict , __a : Dict , __a : List[Any] , __a : str ) ->Tuple:
lowerCamelCase_ : Dict = self.num_labels
lowerCamelCase_ : Optional[Any] = MobileBertForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : Optional[int] , __a : int , __a : str , __a : str , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : List[str] = MobileBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : Tuple , __a : Dict , __a : Dict , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) ->List[str]:
lowerCamelCase_ : Any = self.num_choices
lowerCamelCase_ : int = MobileBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ (a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def _lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : str=False ) ->Any:
lowerCamelCase_ : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
lowerCamelCase_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
lowerCamelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : List[Any] = MobileBertModelTester(self )
lowerCamelCase_ : int = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ) ->Any:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a )
def _lowerCAmelCase ( self : List[str] ) ->Tuple:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a )
def _lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a )
def _lowerCAmelCase ( self : Any ) ->List[Any]:
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a )
def _lowerCAmelCase ( self : Optional[int] ) ->str:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a )
def _lowerCAmelCase ( self : str ) ->Optional[Any]:
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a )
def _lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a )
def __lowerCamelCase ( A__ : List[str] ) -> Optional[int]:
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
snake_case__ : List[str] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(__a )
lowerCamelCase_ : int = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(__a )[0]
lowerCamelCase_ : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __a )
lowerCamelCase_ : str = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 278 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def snake_case_ ( lowercase__ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase =[
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def snake_case_ ( lowercase__ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase =emb.weight.shape
_lowerCAmelCase =nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
_lowerCAmelCase =emb.weight.data
return lin_layer
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =torch.load(lowercase__ , map_location="""cpu""" )
_lowerCAmelCase =Namespace(**checkpoint["""cfg"""]["""model"""] )
_lowerCAmelCase =checkpoint['model']
remove_ignore_keys_(lowercase__ )
_lowerCAmelCase =state_dict['decoder.embed_tokens.weight'].shape[0]
_lowerCAmelCase ={key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
_lowerCAmelCase =XGLMConfig(
vocab_size=lowercase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_lowerCAmelCase =XGLMForCausalLM(lowercase__ )
_lowerCAmelCase =model.load_state_dict(lowercase__ , strict=lowercase__ )
print(lowercase__ )
_lowerCAmelCase =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 711 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: str = ReformerTokenizer
a_: Any = ReformerTokenizerFast
a_: Union[str, Any] = True
a_: int = False
a_: List[Any] = True
def lowerCAmelCase__ ( self : str ):
super().setUp()
_lowerCAmelCase =ReformerTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase ="""<s>"""
_lowerCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCamelCase_ ) , 1000 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self : Tuple ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ="""I was born in 92000, and this is falsé."""
_lowerCAmelCase =tokenizer.tokenize(lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase =self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# Simple input
_lowerCAmelCase ="""This is a simple input"""
_lowerCAmelCase =["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase =("""This is a simple input""", """This is a pair""")
_lowerCAmelCase =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =ReformerTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase ="""Hello World!"""
_lowerCAmelCase =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowerCAmelCase =list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase =""" """.join(lowerCamelCase_ )
_lowerCAmelCase =self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors="""pt""" )
_lowerCAmelCase =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
_lowerCAmelCase =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowerCAmelCase =encoded_sequence["""input_ids"""].shape
_lowerCAmelCase =ReformerModel(lowerCamelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Dict ):
# fmt: off
_lowerCAmelCase ={"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowerCAmelCase =[
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=lowerCamelCase_ , sequences=lowerCamelCase_ , )
| 149 | 0 |
__snake_case :str ={
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def lowerCamelCase_ ( lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ )
A = int(lowerCAmelCase__ )
A = ''
A = False
if decimal < 0:
A = True
decimal *= -1
while decimal > 0:
A , A = divmod(lowerCAmelCase__ , 16 )
A = values[remainder] + hexadecimal
A = '0x' + hexadecimal
if negative:
A = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = []
__snake_case :Union[str, Any] = []
for i in range(self.num_layers ):
__snake_case :List[str] = self.in_channels if i == 0 else self.out_channels
__snake_case :Any = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
__snake_case :Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
__snake_case :Optional[Any] = resnets
__snake_case :List[Any] = attentions
if self.add_downsample:
__snake_case :Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a__ , a__ , a__ , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__snake_case :Dict = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
__snake_case :List[Any] = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__snake_case :List[Any] = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Union[str, Any] = []
for i in range(self.num_layers ):
__snake_case :Union[str, Any] = self.in_channels if i == 0 else self.out_channels
__snake_case :Any = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
__snake_case :Tuple = resnets
if self.add_downsample:
__snake_case :Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a__ , a__ , a__=True ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[Any] = ()
for resnet in self.resnets:
__snake_case :Optional[Any] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__snake_case :Union[str, Any] = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Union[str, Any] = []
__snake_case :Optional[Any] = []
for i in range(self.num_layers ):
__snake_case :Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__snake_case :Tuple = self.prev_output_channel if i == 0 else self.out_channels
__snake_case :int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
__snake_case :Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
__snake_case :int = resnets
__snake_case :Any = attentions
if self.add_upsample:
__snake_case :List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a__ , a__ , a__ , a__ , a__=True ) -> Optional[int]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__snake_case :str = res_hidden_states_tuple[-1]
__snake_case :Optional[Any] = res_hidden_states_tuple[:-1]
__snake_case :List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__snake_case :Dict = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
__snake_case :int = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
__snake_case :Tuple = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :str = []
for i in range(self.num_layers ):
__snake_case :Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__snake_case :List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__snake_case :Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
__snake_case :Optional[int] = resnets
if self.add_upsample:
__snake_case :Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a__ , a__ , a__ , a__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__snake_case :List[str] = res_hidden_states_tuple[-1]
__snake_case :int = res_hidden_states_tuple[:-1]
__snake_case :int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__snake_case :Union[str, Any] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
__snake_case :Optional[int] = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__snake_case :int = []
for _ in range(self.num_layers ):
__snake_case :Dict = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
__snake_case :List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
__snake_case :Tuple = resnets
__snake_case :Optional[Any] = attentions
def __call__( self , a__ , a__ , a__ , a__=True ) -> List[str]:
'''simple docstring'''
__snake_case :str = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__snake_case :List[Any] = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
__snake_case :Any = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 717 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase__ = [8, 5, 9, 7]
lowerCamelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , ) -> None:
'''simple docstring'''
__snake_case :Dict = claim_vector
__snake_case :Optional[int] = allocated_resources_table
__snake_case :Optional[int] = maximum_claim_table
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(a__ ): i for i in self.__need()}
def __lowercase ( self , **a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = self.__need()
__snake_case :List[Any] = self.__allocated_resources_table
__snake_case :str = self.__available_resources()
__snake_case :List[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__snake_case :Dict = False
for each_need in need_list:
__snake_case :Dict = True
for index, need in enumerate(a__ ):
if need > available_resources[index]:
__snake_case :Dict = False
break
if execution:
__snake_case :Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case :List[str] = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(a__ )
# update available/freed resources stack
__snake_case :Tuple = np.array(a__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(a__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __lowercase ( self ) -> Dict:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(a__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(a__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 0 |
"""simple docstring"""
import random
def _snake_case ( __snake_case : str , __snake_case : Tuple , __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = a[left_index]
_lowerCamelCase : Tuple = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
_lowerCamelCase , _lowerCamelCase : int = a[i], a[j]
i += 1
_lowerCamelCase , _lowerCamelCase : List[Any] = a[i - 1], a[left_index]
return i - 1
def _snake_case ( __snake_case : str , __snake_case : List[str] , __snake_case : int ):
"""simple docstring"""
if left < right:
_lowerCamelCase : Optional[Any] = random.randint(lowercase__ , right - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCamelCase : List[str] = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCamelCase : Any = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 88 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=2 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = patch_size
snake_case_ = max_length
snake_case_ = num_mel_bins
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = frequency_stride
snake_case_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case_ = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case_ = frequency_out_dimension * time_out_dimension
snake_case_ = num_patches + 2
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__A = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ASTModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['input_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
snake_case_ , snake_case_ = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.default_feature_extractor
snake_case_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__UpperCamelCase )
snake_case_ = self.default_feature_extractor
snake_case_ , snake_case_ = prepare_audio()
snake_case_ = audio.squeeze().numpy()
snake_case_ = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 187 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowercase : Optional[int] = open # noqa: we just need to have a builtin inside this module to test it properly
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Union[str, Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 114 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case( metaclass=__a ):
__snake_case: Optional[Any] = ['''torch''', '''scipy''']
def __init__(self : Dict , *a : Optional[Any] , **a : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def _UpperCamelCase (cls : Union[str, Any] , *a : List[str] , **a : Any ) -> int:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def _UpperCamelCase (cls : Any , *a : Tuple , **a : Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
| 531 |
'''simple docstring'''
_lowercase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def __UpperCamelCase ( a : str , a : str , a : float ) ->float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(a )}"""
)
raise ValueError(a )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 0 |
from scipy.stats import pearsonr
import datasets
A__: Dict = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
A__: Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
A__: List[Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__: Union[str, Any] = pearsonr(__lowerCamelCase , __lowerCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCamelCase , __lowerCamelCase )[0] )}
| 706 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: int = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase__: Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__: Union[str, Any] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: str = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase__: List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Dict = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase__: Dict = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
| 221 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__SCREAMING_SNAKE_CASE )} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a_ = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a_ = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a_ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a_ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """train"""
a_ = """dev"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def __init__( self : Tuple ,_a : List[str] ,_a : List[Any] ,_a : Dict = None ,_a : Any = Split.train ,_a : Optional[int] = False ,_a : Dict = None ,_a : Any = "pt" ,):
'''simple docstring'''
A_ : Union[str, Any] = args
A_ : Dict = is_language_sensitive
A_ : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
try:
A_ : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
A_ : int = mode
# Load data features from cache or dataset file
A_ : str = """v2""" if args.version_2_with_negative else """v1"""
A_ : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : List[Any] = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
A_ : Optional[Any] = time.time()
A_ : List[Any] = torch.load(UpperCAmelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A_ : List[Any] = self.old_features["""features"""]
A_ : Tuple = self.old_features.get("""dataset""" ,UpperCAmelCase__ )
A_ : Optional[int] = self.old_features.get("""examples""" ,UpperCAmelCase__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
A_ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
A_ : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
A_ , A_ : Any = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=UpperCAmelCase__ ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=UpperCAmelCase__ ,)
A_ : Optional[int] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} ,UpperCAmelCase__ ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict ,_a : List[Any] ):
'''simple docstring'''
A_ : str = self.features[i]
A_ : Any = torch.tensor(feature.input_ids ,dtype=torch.long )
A_ : int = torch.tensor(feature.attention_mask ,dtype=torch.long )
A_ : Dict = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A_ : Optional[Any] = torch.tensor(feature.cls_index ,dtype=torch.long )
A_ : List[str] = torch.tensor(feature.p_mask ,dtype=torch.float )
A_ : List[str] = torch.tensor(feature.is_impossible ,dtype=torch.float )
A_ : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A_ : List[Any] = torch.tensor(feature.start_position ,dtype=torch.long )
A_ : List[Any] = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase_ ( snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__lowerCAmelCase = s_dict.pop(__UpperCamelCase )
elif "subsample" in key:
__lowerCAmelCase = s_dict.pop(__UpperCamelCase )
def UpperCamelCase_ ( snake_case_ : Any ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""]
__lowerCAmelCase = mam_aaa["""model"""]
__lowerCAmelCase = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = args.share_decoder_input_output_embed
__lowerCAmelCase = [int(__UpperCamelCase ) for i in args.conv_kernel_sizes.split(""",""" )]
__lowerCAmelCase = SpeechaTextConfig(
vocab_size=__UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(__UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCamelCase , num_beams=5 , max_length=2_00 , use_cache=__UpperCamelCase , decoder_start_token_id=2 , early_stopping=__UpperCamelCase , )
__lowerCAmelCase = SpeechaTextForConditionalGeneration(__UpperCamelCase )
__lowerCAmelCase = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCAmelCase = lm_head_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 721 | '''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A : List[str] = logging.get_logger(__name__)
_A : Optional[Any] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_A : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_ ( snake_case_ : str ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(snake_case_ )
__lowerCAmelCase = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_ , """__name__""" , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Any , ) -> int:
'''simple docstring'''
__lowerCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_ , encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] ) -> int:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE__ )
def a ( cls : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
__lowerCAmelCase = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config_dict.get("""feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE__ , """feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE__ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 330 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class a__ :
def __init__( self , _a = None ):
lowercase : Tuple = value
lowercase : Union[str, Any] = random()
lowercase : Node | None = None
lowercase : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
lowercase : str = str(self.value ) + " "
lowercase : Optional[Any] = str(self.left or "" )
lowercase : Any = str(self.right or "" )
return value + left + right
def __magic_name__ ( __snake_case : Node | None , __snake_case : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase : Union[str, Any] = split(root.left , __snake_case )
return left, root
else:
lowercase , lowercase : Any = split(root.right , __snake_case )
return root, right
def __magic_name__ ( __snake_case : Node | None , __snake_case : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase : Union[str, Any] = merge(left.right , __snake_case )
return left
else:
lowercase : Union[str, Any] = merge(__snake_case , right.left )
return right
def __magic_name__ ( __snake_case : Node | None , __snake_case : int ) -> Node | None:
lowercase : List[str] = Node(__snake_case )
lowercase , lowercase : List[Any] = split(__snake_case , __snake_case )
return merge(merge(__snake_case , __snake_case ) , __snake_case )
def __magic_name__ ( __snake_case : Node | None , __snake_case : int ) -> Node | None:
lowercase , lowercase : List[str] = split(__snake_case , value - 1 )
lowercase , lowercase : List[Any] = split(__snake_case , __snake_case )
return merge(__snake_case , __snake_case )
def __magic_name__ ( __snake_case : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def __magic_name__ ( __snake_case : Node | None , __snake_case : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowercase : Optional[int] = insert(__snake_case , int(arg[1:] ) )
elif arg[0] == "-":
lowercase : Optional[Any] = erase(__snake_case , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def __magic_name__ ( ) -> None:
lowercase : Optional[int] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
lowercase : Tuple = input()
while args != "q":
lowercase : int = interact_treap(__snake_case , __snake_case )
print(__snake_case )
lowercase : List[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A : List[str] = get_logger(__name__)
class a__ :
__lowerCAmelCase = """dummy_data"""
__lowerCAmelCase = """datasets"""
__lowerCAmelCase = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ):
lowercase : int = 0
lowercase : Optional[Any] = dataset_name
lowercase : List[str] = cache_dir
lowercase : Union[str, Any] = use_local_dummy_data
lowercase : str = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Tuple = str(_a )
# to be downloaded
lowercase : Tuple = None
lowercase : List[Any] = None
@property
def __magic_name__ ( self ):
if self._dummy_file is None:
lowercase : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : str = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ):
if self._bucket_url is None:
lowercase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __magic_name__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __magic_name__ ( self , _a , *_a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __magic_name__ ( self , _a , *_a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , _a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , *_a , **_a ):
return path
def __magic_name__ ( self ):
return {}
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
lowercase : Union[str, Any] = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
lowercase : Any = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
lowercase : List[str] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , _a , _a ):
lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _a ) ) for url in data_url )
lowercase : List[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : Tuple = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Union[str, Any] = os.path.join(_a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __magic_name__ ( self , _a , _a ):
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _a ):
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
lowercase : Optional[int] = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
lowercase : Union[str, Any] = Path(_a )
lowercase : List[Any] = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_a ).as_posix(), file_path.open("rb" )
def __magic_name__ ( self , _a ):
if not isinstance(_a , _a ):
lowercase : Any = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_a , _a )
| 361 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A__ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : int ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: Any= [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__: Dict= np.concatenate(snake_case_ , axis=0 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= np.array(snake_case_ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__: Dict= image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__: List[Any]= 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__: Optional[int]= torch.from_numpy(snake_case_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.cat(snake_case_ , dim=0 )
return image
def A__ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any=0.99_95 ):
if not isinstance(snake_case_ , np.ndarray ):
SCREAMING_SNAKE_CASE__: List[str]= True
SCREAMING_SNAKE_CASE__: Any= va.device
SCREAMING_SNAKE_CASE__: str= va.cpu().numpy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= va.cpu().numpy()
SCREAMING_SNAKE_CASE__: str= np.sum(va * va / (np.linalg.norm(snake_case_ ) * np.linalg.norm(snake_case_ )) )
if np.abs(snake_case_ ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE__: Tuple= (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE__: str= np.arccos(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= np.sin(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= theta_a * t
SCREAMING_SNAKE_CASE__: Any= np.sin(snake_case_ )
SCREAMING_SNAKE_CASE__: int= np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE__: Dict= sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE__: Optional[int]= sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.from_numpy(snake_case_ ).to(snake_case_ )
return va
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= F.normalize(snake_case_ , dim=-1 )
SCREAMING_SNAKE_CASE__: List[Any]= F.normalize(snake_case_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A__ ( snake_case_ : Dict , snake_case_ : List[Any] ):
for param in model.parameters():
SCREAMING_SNAKE_CASE__: Any= value
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> str:
super().__init__()
self.register_modules(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , clip_model=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , feature_extractor=lowerCAmelCase , coca_model=lowerCAmelCase , coca_tokenizer=lowerCAmelCase , coca_transform=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[Any]= (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase )
else feature_extractor.size['''shortest_edge''']
)
SCREAMING_SNAKE_CASE__: Any= transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase )
set_requires_grad(self.clip_model , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__: Optional[int]= self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
self.enable_attention_slicing(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
set_requires_grad(self.vae , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
set_requires_grad(self.vae , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
set_requires_grad(self.unet , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE__: int= min(int(num_inference_steps * strength ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__: Dict= self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Any:
if not isinstance(lowerCAmelCase , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase )}' )
SCREAMING_SNAKE_CASE__: int= image.to(device=lowerCAmelCase , dtype=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase )
]
SCREAMING_SNAKE_CASE__: List[str]= torch.cat(lowerCAmelCase , dim=0 )
else:
SCREAMING_SNAKE_CASE__: List[str]= self.vae.encode(lowerCAmelCase ).latent_dist.sample(lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__: str= 0.18215 * init_latents
SCREAMING_SNAKE_CASE__: Union[str, Any]= init_latents.repeat_interleave(lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE__: List[str]= randn_tensor(init_latents.shape , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
# get latents
SCREAMING_SNAKE_CASE__: Tuple= self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= init_latents
return latents
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.coca_transform(lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__: List[str]= self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE__: str= self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: str= self.feature_extractor.preprocess(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE__: Optional[Any]= self.clip_model.get_image_features(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= image_embeddings_clip.repeat_interleave(lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__: Optional[int]= self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE__: Dict= self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__: Optional[Any]= 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__: str= (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.sqrt(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Dict= self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE__: Optional[int]= latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__: List[Any]= 1 / 0.18215 * sample
SCREAMING_SNAKE_CASE__: Tuple= self.vae.decode(lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Optional[int]= (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__: int= transforms.Resize(self.feature_extractor_size )(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.normalize(lowerCAmelCase ).to(latents.dtype )
SCREAMING_SNAKE_CASE__: int= self.clip_model.get_image_features(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= spherical_dist_loss(lowerCAmelCase , lowerCAmelCase ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE__: Tuple= -torch.autograd.grad(lowerCAmelCase , lowerCAmelCase )[0]
if isinstance(self.scheduler , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE__: Dict= noise_pred_original
else:
SCREAMING_SNAKE_CASE__: List[Any]= noise_pred_original - torch.sqrt(lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 0.6 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = 100 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = 0.8 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , ) -> Optional[Any]:
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowerCAmelCase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(lowerCAmelCase , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE__: Optional[Any]= [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE__: Optional[Any]= [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
SCREAMING_SNAKE_CASE__: List[Any]= [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE__: Dict= ''', '''.join(lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_image_description(lowerCAmelCase )
if style_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_image_description(lowerCAmelCase )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE__: Any= self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__: str= self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__: Optional[int]= self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__: List[Any]= slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__: Optional[Any]= text_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE__: Optional[Any]= '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE__: Tuple= {}
if accepts_offset:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
self.scheduler.set_timesteps(lowerCAmelCase , **lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_timesteps(lowerCAmelCase , lowerCAmelCase , self.device )
SCREAMING_SNAKE_CASE__: Tuple= timesteps[:1].repeat(lowerCAmelCase )
# Preprocess image
SCREAMING_SNAKE_CASE__: Dict= preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= slerp(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__: Any= guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__: Optional[Any]= content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__: int= uncond_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__: List[str]= torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__: Optional[int]= (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__: str= text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE__: Dict= torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device='''cpu''' , dtype=lowerCAmelCase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__: str= torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
SCREAMING_SNAKE_CASE__: str= latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__: Any= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__: Any= '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__: Optional[Any]= {}
if accepts_eta:
SCREAMING_SNAKE_CASE__: List[Any]= eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE__: Optional[Any]= '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE__: Tuple= generator
with self.progress_bar(total=lowerCAmelCase ):
for i, t in enumerate(lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__: str= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__: List[str]= self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__: Any= self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__: Optional[int]= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.cond_fn(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__: int= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__: List[Any]= 1 / 0.18215 * latents
SCREAMING_SNAKE_CASE__: Tuple= self.vae.decode(lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Tuple= (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__: int= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__: Tuple= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase )
| 107 | from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "scipy"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Any:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 107 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''decision_transformer'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int]=17 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Dict=128 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : int="relu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=1E-5 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[Any]=50256 , lowerCAmelCase__ : Union[str, Any]=50256 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : Optional[int] , ) -> Optional[Any]:
snake_case__ = state_dim
snake_case__ = act_dim
snake_case__ = hidden_size
snake_case__ = max_ep_len
snake_case__ = action_tanh
snake_case__ = vocab_size
snake_case__ = n_positions
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_inner
snake_case__ = activation_function
snake_case__ = resid_pdrop
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_range
snake_case__ = scale_attn_weights
snake_case__ = use_cache
snake_case__ = scale_attn_by_inverse_layer_idx
snake_case__ = reorder_and_upcast_attn
snake_case__ = bos_token_id
snake_case__ = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 214 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : str = '''vit'''
def __init__( self : str , lowerCAmelCase__ : int=768 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : Optional[int]=3072 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Optional[Any]=1E-12 , lowerCAmelCase__ : Optional[int]=224 , lowerCAmelCase__ : Optional[Any]=16 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=16 , **lowerCAmelCase__ : str , ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = qkv_bias
snake_case__ = encoder_stride
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Dict = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self : Any ) -> float:
return 1E-4
| 214 | 1 |
"""simple docstring"""
from collections import namedtuple
__lowerCAmelCase : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
__lowerCAmelCase : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ''', '''.join(UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ''', '''.join(UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase ) -> bool:
"""simple docstring"""
a__ = len(UpperCamelCase )
# We need to create solution object to save path.
a__ = [[0 for _ in range(UpperCamelCase )] for _ in range(UpperCamelCase )]
a__ = run_maze(UpperCamelCase , 0 , 0 , UpperCamelCase )
if solved:
print('''\n'''.join(str(UpperCamelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
a__ = len(UpperCamelCase )
# Final check point.
if i == j == (size - 1):
a__ = 1
return True
a__ = (not i < 0) and (not j < 0) # Check lower bounds
a__ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a__ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a__ = 1
# check for directions
if (
run_maze(UpperCamelCase , i + 1 , UpperCamelCase , UpperCamelCase )
or run_maze(UpperCamelCase , UpperCamelCase , j + 1 , UpperCamelCase )
or run_maze(UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase )
or run_maze(UpperCamelCase , UpperCamelCase , j - 1 , UpperCamelCase )
):
return True
a__ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( a__ = 1_0_0_0_0_0_0) -> int:
"""simple docstring"""
_snake_case : Dict = limit + 1
_snake_case : Union[str, Any] = [0] * limit
for first_term in range(1 , a__):
for n in range(a__ , a__ , a__):
_snake_case : List[str] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case : List[Any] = sum(1 for x in frequency[1:limit] if x == 1_0)
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 517 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE_ = BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
SCREAMING_SNAKE_CASE_ = model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE_ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
SCREAMING_SNAKE_CASE_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
SCREAMING_SNAKE_CASE_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
SCREAMING_SNAKE_CASE_ = state_dict["cls.predictions.decoder.weight"]
SCREAMING_SNAKE_CASE_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_ = state_dict[F'''cls.predictions.transform.dense.{w}''']
SCREAMING_SNAKE_CASE_ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 517 | 1 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowercase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase__ = """allenai"""
def _snake_case ( lowercase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_lowerCamelCase : Optional[Any] = dict((re.sub(r'@@$' , '' , _snake_case ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _snake_case ), v) for k, v in d.items() )
_lowerCamelCase : List[str] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_lowerCamelCase : Optional[Any] = d[k] # restore
return da
def _snake_case ( lowercase__ , lowercase__ ):
# prep
assert os.path.exists(_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_lowerCamelCase : Dict = basename(_snake_case )
_lowerCamelCase : List[str] = dirname(_snake_case )
_lowerCamelCase : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_lowerCamelCase : int = cls.hub_models()
_lowerCamelCase : Union[str, Any] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
_lowerCamelCase : Any = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
_lowerCamelCase : List[str] = hub_utils.from_pretrained(
_snake_case , _snake_case , _snake_case , archive_map=_snake_case , **_snake_case )
_lowerCamelCase : int = vars(chkpt['args']['model'] )
_lowerCamelCase : Optional[int] = args['source_lang']
_lowerCamelCase : Any = args['target_lang']
_lowerCamelCase : str = dirname(_snake_case )
_lowerCamelCase : List[Any] = basename(_snake_case )
# dicts
_lowerCamelCase : List[str] = os.path.join(_snake_case , f'''dict.{src_lang}.txt''' )
_lowerCamelCase : List[Any] = os.path.join(_snake_case , f'''dict.{tgt_lang}.txt''' )
_lowerCamelCase : List[str] = Dictionary.load(_snake_case )
_lowerCamelCase : List[str] = rewrite_dict_keys(src_dict.indices )
_lowerCamelCase : Any = len(_snake_case )
_lowerCamelCase : Optional[Any] = os.path.join(_snake_case , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_lowerCamelCase : Any = True
for k in src_vocab.keys():
if not k.islower():
_lowerCamelCase : List[str] = False
break
_lowerCamelCase : Optional[Any] = Dictionary.load(_snake_case )
_lowerCamelCase : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
_lowerCamelCase : Any = len(_snake_case )
_lowerCamelCase : List[str] = os.path.join(_snake_case , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# merges_file (bpecodes)
_lowerCamelCase : Tuple = os.path.join(_snake_case , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_lowerCamelCase : Dict = os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ):
break
with open(_snake_case , encoding='utf-8' ) as fin:
_lowerCamelCase : List[str] = fin.read()
_lowerCamelCase : Dict = re.sub(r' \d+$' , '' , _snake_case , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as fout:
fout.write(_snake_case )
# model config
_lowerCamelCase : int = os.path.join(_snake_case , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_lowerCamelCase : List[str] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
_lowerCamelCase : Optional[Any] = 5
_lowerCamelCase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_lowerCamelCase : int = best_score_hparams[model_dir]['length_penalty']
else:
_lowerCamelCase : Optional[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# tokenizer config
_lowerCamelCase : Dict = os.path.join(_snake_case , _snake_case )
_lowerCamelCase : List[Any] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# model
_lowerCamelCase : Union[str, Any] = chkpt['models'][0]
_lowerCamelCase : Optional[int] = model.state_dict()
# rename keys to start with 'model.'
_lowerCamelCase : List[str] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_lowerCamelCase : Tuple = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_snake_case , _snake_case )
_lowerCamelCase : Optional[Any] = FSMTConfig.from_pretrained(_snake_case )
_lowerCamelCase : List[Any] = FSMTForConditionalGeneration(_snake_case )
# check that it loads ok
model_new.load_state_dict(_snake_case , strict=_snake_case )
# save
_lowerCamelCase : List[str] = os.path.join(_snake_case , _snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 711 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : Tuple = ''
_lowerCamelCase : Any = ''
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Dict = 256
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = 0
def A_ ( self , lowercase ):
_lowerCamelCase : Any = cva.imread(lowercase , 0 )
_lowerCamelCase : Optional[int] = copy.deepcopy(self.img )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
_lowerCamelCase : Optional[int] = np.sum(lowercase )
for i in range(len(lowercase ) ):
_lowerCamelCase : List[Any] = x[i] / self.k
self.sk += prk
_lowerCamelCase : int = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase : Union[str, Any] = int(last % last )
_lowerCamelCase : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase )
_lowerCamelCase : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase : int = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase : Optional[Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase : Any = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def A_ ( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A_ ( self ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
lowercase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 492 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = "switch_transformers"
_A = ["past_key_values"]
_A = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowercase__=3_2128 , lowercase__=768 , lowercase__=64 , lowercase__=2048 , lowercase__=64 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=8 , lowercase__=False , lowercase__=0.01 , lowercase__="float32" , lowercase__=False , lowercase__=32 , lowercase__=128 , lowercase__=0.1 , lowercase__=1e-6 , lowercase__=0.001 , lowercase__=0.001 , lowercase__=1.0 , lowercase__="relu" , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=0 , lowercase__=1 , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = d_model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE_ : Dict = d_ff
SCREAMING_SNAKE_CASE_ : List[Any] = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_layers
SCREAMING_SNAKE_CASE_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE_ : Tuple = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : Dict = num_experts
SCREAMING_SNAKE_CASE_ : str = expert_capacity
SCREAMING_SNAKE_CASE_ : str = router_bias
SCREAMING_SNAKE_CASE_ : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE_ : List[Any] = router_dtype
SCREAMING_SNAKE_CASE_ : List[Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE_ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ : str = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ : Tuple = dropout_rate
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Any = initializer_factor
SCREAMING_SNAKE_CASE_ : List[Any] = feed_forward_proj
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
SCREAMING_SNAKE_CASE_ : Optional[Any] = add_router_probs
SCREAMING_SNAKE_CASE_ : List[Any] = router_z_loss_coef
SCREAMING_SNAKE_CASE_ : List[str] = router_aux_loss_coef
SCREAMING_SNAKE_CASE_ : Dict = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE_ : List[str] = act_info[-1]
SCREAMING_SNAKE_CASE_ : Tuple = act_info[0] == "gated"
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_ : Any = "gelu_new"
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , **lowercase__ , )
| 421 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
snake_case_ = parser.parse_args()
snake_case_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 421 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _snake_case :
__lowerCAmelCase : Optional[int] = 42
__lowerCAmelCase : str = 42
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : list[list[Edge]] = [[] for _ in range(_lowercase)]
lowercase__ : Dict = size
def __getitem__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return iter(self._graph[vertex])
@property
def lowercase__ ( self):
'''simple docstring'''
return self._size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""")
self._graph[from_vertex].append(Edge(_lowercase , _lowercase))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = deque([start_vertex])
lowercase__ : list[int | None] = [None] * self.size
lowercase__ : Tuple = 0
while queue:
lowercase__ : Union[str, Any] = queue.popleft()
lowercase__ : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ : Union[str, Any] = current_distance + edge.weight
lowercase__ : Dict = distances[edge.destination_vertex]
if (
isinstance(_lowercase , _lowercase)
and new_distance >= dest_vertex_distance
):
continue
lowercase__ : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _snake_case :
__lowerCAmelCase : Optional[int] = PegasusConfig
__lowerCAmelCase : str = {}
__lowerCAmelCase : int = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
'''simple docstring'''
lowercase__ : Any = parent
lowercase__ : int = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : str = is_training
lowercase__ : str = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = eos_token_id
lowercase__ : Any = pad_token_id
lowercase__ : Any = bos_token_id
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase__ : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase__ : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Any = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = 20
lowercase__ : str = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : str = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : int = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : str = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Any = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : Tuple = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Dict = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
lowercase__ : int = np.not_equal(lowercase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase__ : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[Any] = False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = FlaxPegasusModelTester(self)
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE_)
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
with self.subTest("""JIT Enabled"""):
lowercase__ : List[Any] = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : Optional[int] = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase__ : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , )
with self.subTest("""JIT Enabled"""):
lowercase__ : List[str] = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : Tuple = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.ones((1, 1))
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase__ : Optional[int] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase__ : Union[str, Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 , padding=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2).sequences
lowercase__ : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
assert tgt_text == decoded
| 495 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A =''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
def __init__( self , lowercase = " " ) -> List[str]:
lowerCamelCase_ = sentence_delimiter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
return list(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = []
for sent_idx, sentence in enumerate(lowercase ):
chars.extend(self.process_string(lowercase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__A ='''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__A ='''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> List[str]:
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(lowercase , lowercase ):
lowerCamelCase_ = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 463 |
from math import isqrt
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def lowerCamelCase_ ( lowerCamelCase__ = 1_0**8 ):
lowerCamelCase_ = calculate_prime_numbers(max_number // 2 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : List[Any] , a_ : List[Any] , a_ : Dict , a_ : Optional[Any] = None , a_ : Any = None , a_ : List[Any] = False , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__snake_case = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__snake_case = self.builder.as_dataset(
split="train" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , a_ : Tuple , a_ : List[str] , a_ : int , a_ : List[str] = None , a_ : str = None , **a_ : Any , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__snake_case = dataset
__snake_case = name
__snake_case = con
__snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case = num_proc
__snake_case = to_sql_kwargs
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.to_sql_kwargs.pop("sql" , _A )
__snake_case = self.to_sql_kwargs.pop("con" , _A )
__snake_case = self.to_sql_kwargs.pop("index" , _A )
__snake_case = self._write(index=_A , **self.to_sql_kwargs )
return written
def A ( self : str , a_ : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = args
__snake_case = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
__snake_case = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case = batch.to_pandas()
__snake_case = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def A ( self : Union[str, Any] , a_ : Optional[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__A = logging.getLogger(__name__)
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : Dict = git.Repo(search_parent_directories=lowercase__ )
snake_case : List[Any] = {
"repo_id": str(lowercase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase__ , "git_log.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ , indent=4 )
def UpperCamelCase__ ( lowercase__ : Any ):
if params.n_gpu <= 0:
snake_case : Optional[Any] = 0
snake_case : List[Any] = -1
snake_case : int = True
snake_case : str = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case : Tuple = int(os.environ["WORLD_SIZE"] )
snake_case : Dict = int(os.environ["N_GPU_NODE"] )
snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
snake_case : str = params.global_rank // params.n_gpu_per_node
snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case : Union[str, Any] = 1
snake_case : List[Any] = 0
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = 0
snake_case : List[str] = 1
snake_case : List[str] = 1
snake_case : Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
snake_case : Union[str, Any] = params.n_nodes > 1
# summary
snake_case : Optional[Any] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 134 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : str = [int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 134 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[Any] = [1]
__A , __A , __A : Union[str, Any] = 0, 0, 0
__A : Optional[int] = ugly_nums[ia] * 2
__A : Any = ugly_nums[ia] * 3
__A : str = ugly_nums[ia] * 5
for _ in range(1 , a ):
__A : Tuple = min(a , a , a )
ugly_nums.append(a )
if next_num == next_a:
ia += 1
__A : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__A : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__A : List[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 77 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict = True , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
snake_case_ : Optional[Any] = False
if main_process_only:
snake_case_ : Dict = PartialState().local_process_index == 0
return _tqdm(*_UpperCAmelCase , **_UpperCAmelCase , disable=_UpperCAmelCase )
| 480 | import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase : Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: str = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), f"""{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
try:
lowerCamelCase_: Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(_UpperCAmelCase ) )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = "student" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
lowerCamelCase_: Union[str, Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCamelCase_: Tuple = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f"""teacher must be a model or string got type {type(_UpperCAmelCase )}"""
lowerCamelCase_: Dict = teacher.config.to_diff_dict()
try:
lowerCamelCase_ , lowerCamelCase_: Tuple = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCamelCase_: str = teacher_e
if d is None:
lowerCamelCase_: Union[str, Any] = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCamelCase_ , lowerCamelCase_: List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCamelCase_: Tuple = teacher_e
if d is None:
lowerCamelCase_: Optional[Any] = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCamelCase_: Tuple = teacher.config_class(**_UpperCAmelCase )
lowerCamelCase_: int = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCamelCase_: Tuple = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCamelCase_ , lowerCamelCase_: List[str] = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCamelCase_: List[int] = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCamelCase_: List[int] = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowerCamelCase_: Union[str, Any] = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 423 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ = random.Random()
if is_torch_available():
import torch
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple=1.0 , snake_case_ : Dict=None , snake_case_ : int=None ) -> str:
if rng is None:
__snake_case = global_rng
__snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : Dict , a__ : Tuple , a__ : Any=7 , a__ : str=400 , a__ : Optional[Any]=2000 , a__ : List[str]=1 , a__ : List[Any]=0.0 , a__ : Optional[Any]=1_6000 , a__ : Optional[Any]=True , a__ : int=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = min_seq_length
__snake_case = max_seq_length
__snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case = feature_size
__snake_case = padding_value
__snake_case = sampling_rate
__snake_case = return_attention_mask
__snake_case = do_normalize
def a (self : Any ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a (self : Any , a__ : Union[str, Any]=False , a__ : List[str]=False ):
"""simple docstring"""
def _flatten(a__ : int ):
return list(itertools.chain(*A__ ) )
if equal_length:
__snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : int = ASTFeatureExtractor
def a (self : int ):
"""simple docstring"""
__snake_case = ASTFeatureExtractionTester(self )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
__snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test batched
__snake_case = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
__snake_case = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case = np.asarray(A__ )
__snake_case = feat_extract(A__ , return_tensors='''np''' ).input_values
__snake_case = feat_extract(A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
@require_torch
def a (self : List[str] ):
"""simple docstring"""
import torch
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = np.random.rand(100 ).astype(np.floataa )
__snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a (self : Optional[Any] , a__ : Any ):
"""simple docstring"""
from datasets import load_dataset
__snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__snake_case = self._load_datasamples(1 )
__snake_case = ASTFeatureExtractor()
__snake_case = feature_extractor(A__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1E-4 ) )
| 720 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : Optional[Any] , **a__ : Optional[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a (self : str , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ObjectDetectionPipeline(model=a__ , image_processor=a__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a (self : List[str] , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
import datasets
__snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__snake_case = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__snake_case = object_detector(a__ , threshold=0.0 )
self.assertEqual(len(a__ ) , len(a__ ) )
for outputs in batch_outputs:
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def a (self : Any ):
"""simple docstring"""
__snake_case = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__snake_case = AutoModelForObjectDetection.from_pretrained(a__ )
__snake_case = AutoFeatureExtractor.from_pretrained(a__ )
__snake_case = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def a (self : int ):
"""simple docstring"""
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = AutoModelForObjectDetection.from_pretrained(a__ )
__snake_case = AutoFeatureExtractor.from_pretrained(a__ )
__snake_case = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = pipeline('''object-detection''' , model=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = 0.9_9_8_5
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = pipeline('''object-detection''' , model=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=a__ )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''Narsil/layoutlmv3-finetuned-funsd'''
__snake_case = 0.9_9_9_3
__snake_case = pipeline('''object-detection''' , model=a__ , threshold=a__ )
__snake_case = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 388 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = value
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tree
def a ( self , snake_case__ ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 444 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase (_A , _A = 2 , _A = 1 , _A = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_A , _A , _A ) -> int:
return (pow(_A , 2 ) + step) % modulus
for _ in range(_A ):
# These track the position within the cycle detection logic.
_lowerCAmelCase : Dict = seed
_lowerCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase : str = rand_fn(_A , _A , _A )
_lowerCAmelCase : Optional[int] = rand_fn(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = rand_fn(_A , _A , _A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase : Optional[int] = gcd(hare - tortoise , _A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 444 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase_ : str = logging.getLogger(__name__)
class _lowercase ( lowerCAmelCase ):
_a : Optional[int] = '''token-classification'''
def __init__( self : Union[str, Any] , a : Optional[int] ):
"""simple docstring"""
if type(a ) == dict:
__snake_case : Optional[int] =Namespace(**a )
__snake_case : Optional[int] =import_module('''tasks''' )
try:
__snake_case : Optional[int] =getattr(a , hparams.task_type )
__snake_case : TokenClassificationTask =token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case : str =self.token_classification_task.get_labels(hparams.labels )
__snake_case : Tuple =CrossEntropyLoss().ignore_index
super().__init__(a , len(self.labels ) , self.mode )
def _UpperCamelCase ( self : Optional[Any] , **a : Tuple ):
"""simple docstring"""
return self.model(**a )
def _UpperCamelCase ( self : Any , a : str , a : Tuple ):
"""simple docstring"""
__snake_case : Dict ={'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__snake_case : Optional[Any] =(
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case : Union[str, Any] =self(**a )
__snake_case : int =outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Optional[int] =self.hparams
for mode in ["train", "dev", "test"]:
__snake_case : List[Any] =self._feature_file(a )
if os.path.exists(a ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a )
__snake_case : Optional[Any] =torch.load(a )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
__snake_case : List[str] =self.token_classification_task.read_examples_from_file(args.data_dir , a )
__snake_case : List[str] =self.token_classification_task.convert_examples_to_features(
a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=a , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , a )
torch.save(a , a )
def _UpperCamelCase ( self : Optional[int] , a : int , a : int , a : bool = False ):
"""simple docstring"""
__snake_case : Any =self._feature_file(a )
logger.info('''Loading features from cached file %s''' , a )
__snake_case : int =torch.load(a )
__snake_case : Optional[int] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case : Any =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case : Optional[Any] =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case : int =torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case : Any =torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(a , a , a , a ) , batch_size=a )
def _UpperCamelCase ( self : Optional[int] , a : int , a : int ):
"""simple docstring"""
"""Compute validation""" ""
__snake_case : Union[str, Any] ={'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__snake_case : Union[str, Any] =(
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case : Optional[Any] =self(**a )
__snake_case , __snake_case : Optional[int] =outputs[:2]
__snake_case : Optional[Any] =logits.detach().cpu().numpy()
__snake_case : Any =inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCamelCase ( self : Union[str, Any] , a : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =torch.stack([x['''val_loss'''] for x in outputs] ).mean()
__snake_case : Optional[int] =np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
__snake_case : Union[str, Any] =np.argmax(a , axis=2 )
__snake_case : str =np.concatenate([x['''target'''] for x in outputs] , axis=0 )
__snake_case : Union[str, Any] =dict(enumerate(self.labels ) )
__snake_case : List[Any] =[[] for _ in range(out_label_ids.shape[0] )]
__snake_case : Any =[[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case : int ={
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(a , a ),
'''precision''': precision_score(a , a ),
'''recall''': recall_score(a , a ),
'''f1''': fa_score(a , a ),
}
__snake_case : Any =dict(results.items() )
__snake_case : int =results
return ret, preds_list, out_label_list
def _UpperCamelCase ( self : List[str] , a : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case : str =self._eval_end(a )
__snake_case : Optional[Any] =ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCamelCase ( self : List[Any] , a : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Tuple =self._eval_end(a )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case : Tuple =ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCamelCase ( a : int , a : Optional[int] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a , a )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=a , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=a , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
UpperCamelCase_ : List[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase_ : Optional[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase_ : Optional[Any] = parser.parse_args()
UpperCamelCase_ : List[Any] = NERTransformer(args)
UpperCamelCase_ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
UpperCamelCase_ : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 497 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : List[str] = MgpstrTokenizer
_a : int = False
_a : List[str] = {}
_a : Optional[Any] = False
def _UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
# fmt: off
__snake_case : Union[str, Any] =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__snake_case : Optional[Any] =dict(zip(a , range(len(a ) ) ) )
__snake_case : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
def _UpperCamelCase ( self : Any , **a : int ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCamelCase ( self : int , a : Any ):
"""simple docstring"""
__snake_case : Dict ='''tester'''
__snake_case : str ='''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[Any] ='''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__snake_case : Tuple =tokenizer.encode([special_token] , add_special_tokens=a )
self.assertEqual(len(a ) , 1 )
__snake_case : int =tokenizer.decode(a , skip_special_tokens=a )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Tuple =self.get_input_output_texts(a )
__snake_case : int =tokenizer.tokenize(a )
__snake_case : str =tokenizer.convert_tokens_to_ids(a )
__snake_case : Union[str, Any] =tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__snake_case : Tuple =tokenizer.convert_ids_to_tokens(a )
self.assertNotEqual(len(a ) , 0 )
__snake_case : Optional[Any] =tokenizer.decode(a )
self.assertIsInstance(a , a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
| 497 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any=3 ,_UpperCamelCase : Union[str, Any]=3_2 ,_UpperCamelCase : List[Any]=3 ,_UpperCamelCase : Dict=1_0 ,_UpperCamelCase : Dict=[1_0, 2_0, 3_0, 4_0] ,_UpperCamelCase : List[Any]=[1, 1, 2, 1] ,_UpperCamelCase : Dict=True ,_UpperCamelCase : int=True ,_UpperCamelCase : str="relu" ,_UpperCamelCase : Union[str, Any]=3 ,_UpperCamelCase : Any=None ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =parent
SCREAMING_SNAKE_CASE__ =batch_size
SCREAMING_SNAKE_CASE__ =image_size
SCREAMING_SNAKE_CASE__ =num_channels
SCREAMING_SNAKE_CASE__ =embeddings_size
SCREAMING_SNAKE_CASE__ =hidden_sizes
SCREAMING_SNAKE_CASE__ =depths
SCREAMING_SNAKE_CASE__ =is_training
SCREAMING_SNAKE_CASE__ =use_labels
SCREAMING_SNAKE_CASE__ =hidden_act
SCREAMING_SNAKE_CASE__ =num_labels
SCREAMING_SNAKE_CASE__ =scope
SCREAMING_SNAKE_CASE__ =len(a__ )
def __A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ =self.get_config()
return config, pixel_values
def __A ( self : Dict ) -> List[Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __A ( self : str ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =FlaxRegNetModel(config=a__ )
SCREAMING_SNAKE_CASE__ =model(a__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def __A ( self : Tuple ,_UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.num_labels
SCREAMING_SNAKE_CASE__ =FlaxRegNetForImageClassification(config=a__ )
SCREAMING_SNAKE_CASE__ =model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __A ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ =config_and_inputs
SCREAMING_SNAKE_CASE__ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_A : Any = False
_A : List[str] = False
_A : Optional[Any] = False
def __A ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ =ConfigTester(self ,config_class=a__ ,has_text_modality=a__ )
def __A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __A ( self : int ) -> Tuple:
'''simple docstring'''
pass
def __A ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ =model_class(a__ )
SCREAMING_SNAKE_CASE__ =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,a__ )
def __A ( self : int ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str] ):
SCREAMING_SNAKE_CASE__ =model_class(a__ )
SCREAMING_SNAKE_CASE__ =model(**self._prepare_for_class(a__ ,a__ ) )
SCREAMING_SNAKE_CASE__ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ =self.model_tester.num_stages
self.assertEqual(len(a__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ =True
check_hidden_states_output(a__ ,a__ ,a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ =True
check_hidden_states_output(a__ ,a__ ,a__ )
def __A ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ =self._prepare_for_class(a__ ,a__ )
SCREAMING_SNAKE_CASE__ =model_class(a__ )
@jax.jit
def model_jitted(_UpperCamelCase : List[str] ,**_UpperCamelCase : Dict ):
return model(pixel_values=a__ ,**a__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ =model_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ =model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) ,len(a__ ) )
for jitted_output, output in zip(a__ ,a__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ) -> Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def __A ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
SCREAMING_SNAKE_CASE__ =self.default_image_processor
SCREAMING_SNAKE_CASE__ =prepare_img()
SCREAMING_SNAKE_CASE__ =image_processor(images=a__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ =model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE__ =(1, 1_0_0_0)
self.assertEqual(outputs.logits.shape ,a__ )
SCREAMING_SNAKE_CASE__ =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,a__ ,atol=1e-4 ) )
| 151 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Dict = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "unispeech-sat"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=504 , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Any = feat_extract_activation
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : List[str] = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : str = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Optional[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : Union[str, Any] = layerdrop
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : str = num_clusters
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Tuple = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : List[Any] = mask_time_length
_lowerCAmelCase : List[Any] = mask_time_min_masks
_lowerCAmelCase : Optional[Any] = mask_feature_prob
_lowerCAmelCase : str = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : Tuple = num_codevector_groups
_lowerCAmelCase : str = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Optional[int] = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(a__ )
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : Union[str, Any] = list(a__ )
_lowerCAmelCase : List[str] = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 213 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : Any , __a : Any ):
"""simple docstring"""
try:
with open(__a , 'rb' ) as flax_state_f:
_a : Union[str, Any] = from_bytes(__a , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__a ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] , __a : Dict ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_a : Tuple = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values()
if any(__a ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_a : Optional[int] = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a )
_a : str = ''
_a : int = flatten_dict(__a , sep='.' )
_a : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
_a : Union[str, Any] = []
_a : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_a : Optional[int] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_a : str = flax_key_tuple_array[:-1] + ['weight']
_a : Dict = jnp.transpose(__a , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_a : Dict = flax_key_tuple_array[:-1] + ['weight']
_a : List[str] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_a : Dict = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__a ):
_a : Optional[Any] = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
_a : int = '.'.join(__a )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_a : List[str] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor
_a : Union[str, Any] = torch.from_numpy(__a )
# remove from missing keys
missing_keys.remove(__a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__a )
pt_model.load_state_dict(__a )
# re-transform missing_keys to list
_a : Optional[int] = list(__a )
if len(__a ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__a ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A( a ):
snake_case_ = '''Speech2TextFeatureExtractor'''
snake_case_ = '''Speech2TextTokenizer'''
def __init__( self , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
super().__init__(_snake_case , _snake_case )
__a = self.feature_extractor
__a = False
def __call__( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__a = kwargs.pop('''raw_speech''' )
else:
__a = kwargs.pop('''audio''' , _snake_case )
__a = kwargs.pop('''sampling_rate''' , _snake_case )
__a = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if text is not None:
__a = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False | 219 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger()
@dataclass
class __A:
snake_case_ = 42
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self , _snake_case ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def __call__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = Tracker(self.dest )(_snake_case ).parametrized
__a = Tracker(self.src )(_snake_case ).parametrized
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_snake_case )} operations while"""
F""" destination module has {len(_snake_case )}.""" )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = True ) -> str:
print(F"""Converting {name}...""" )
with torch.no_grad():
__a = timm.create_model(a__ , pretrained=a__ ).eval()
__a = ResNetForImageClassification(a__ ).eval()
__a = ModuleTransfer(src=a__ , dest=a__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(a__ )
assert torch.allclose(from_model(a__ ) , our_model(a__ ).logits ), "The model logits don't match the original one."
__a = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(a__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a__ , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a__ , )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCAmelCase ( a__ , a__ = None , a__ = True ) -> List[Any]:
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(a__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A : List[Any] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 219 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''torch''', '''torchsde''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Any = logging.get_logger(__name__)
# General docstring
_lowercase: List[Any] = '''RegNetConfig'''
# Base docstring
_lowercase: List[Any] = '''facebook/regnet-y-040'''
_lowercase: int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowercase: Union[str, Any] = '''facebook/regnet-y-040'''
_lowercase: Tuple = '''tabby, tabby cat'''
_lowercase: str = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int = 3 , lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : Optional[str] = "relu" , ):
super().__init__()
_lowerCAmelCase = nn.Convad(
lowercase__ , lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=kernel_size // 2 , groups=lowercase__ , bias=lowercase__ , )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowerCAmelCase = self.embedder(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 ):
super().__init__()
_lowerCAmelCase = nn.Convad(lowercase__ , lowercase__ , kernel_size=1 , stride=lowercase__ , bias=lowercase__ )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tensor ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[Any] ):
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = self.attention(lowercase__ )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 2 , ):
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , ) , *[layer(lowercase__ , lowercase__ , lowercase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.layers(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tensor , lowercase__ : bool = False , lowercase__ : bool = True ):
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(lowercase__ )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =RegNetConfig
UpperCamelCase__ ="regnet"
UpperCamelCase__ ="pixel_values"
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[Any] ):
if isinstance(lowercase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = value
_lowercase: Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase: str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[str] , lowercase__ : int ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(lowercase__ )
_lowerCAmelCase = RegNetEncoder(lowercase__ )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tensor , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None ):
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : str , lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(lowercase__ )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[torch.LongTensor] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , ):
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = 'single_label_classification'
else:
_lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 192 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ )-> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 700 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCamelCase = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 451 | 0 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] ) -> None:
_lowercase = len(SCREAMING_SNAKE_CASE_ )
print("""The following activities are selected:""" )
# The first activity is always selected
_lowercase = 0
print(SCREAMING_SNAKE_CASE_ , end=""",""" )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE_ , end=""",""" )
_lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Union[str, Any] = [1, 3, 0, 5, 8, 5]
A : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 287 |
import datasets
A : Optional[int] = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
A : Optional[int] = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
A : str = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )} | 287 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase ( __snake_case ):
lowercase = None
lowercase = None
lowercase = None
lowercase = None
class UpperCAmelCase ( __snake_case ):
def __init__( self : Union[str, Any] , __magic_name__ : List[Any]=1 , __magic_name__ : List[Any]=0 , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=5_1_2 , __magic_name__ : Union[str, Any]="cls" , __magic_name__ : Any=False , __magic_name__ : str=True , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
UpperCamelCase = project_dim
UpperCamelCase = pooler_fn
UpperCamelCase = learn_encoder
UpperCamelCase = use_attention_mask
class UpperCAmelCase ( __snake_case ):
lowercase = [R"""pooler""", R"""logit_scale"""]
lowercase = [R"""position_ids""", R"""predictions.decoder.bias"""]
lowercase = """roberta"""
lowercase = RobertaSeriesConfig
def __init__( self : List[str] , __magic_name__ : Dict ):
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCamelCase = XLMRobertaModel(__magic_name__ )
UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase = getattr(__magic_name__ , """has_pre_transformation""" , __magic_name__ )
if self.has_pre_transformation:
UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.base_model(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_attentions=__magic_name__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__magic_name__ , )
if self.has_pre_transformation:
UpperCamelCase = outputs["""hidden_states"""][-2]
UpperCamelCase = self.pre_LN(__magic_name__ )
UpperCamelCase = self.transformation_pre(__magic_name__ )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 181 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase_ ( self : Any , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = """lower newer"""
UpperCamelCase = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase = """lower"""
UpperCamelCase = ["""low""", """er</w>"""]
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCamelCase = tokens + ["""<unk>"""]
UpperCamelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 181 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
_lowerCamelCase : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
_lowerCamelCase : Dict = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : int = sd.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : Any = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
_lowerCamelCase : Any = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
_lowerCamelCase : List[Any] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
_lowerCamelCase : Optional[int] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
_lowerCamelCase : Union[str, Any] = q
_lowerCamelCase : str = k
_lowerCamelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) ->List[Any]:
_lowerCamelCase : List[Any] = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
_lowerCamelCase : Tuple = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_lowerCamelCase : Optional[int] = OPTConfig()
_lowerCamelCase : List[Any] = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
SCREAMING_SNAKE_CASE__ : Any =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 434 | """simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Any=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=5 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : int=None , ) -> Dict:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ) -> int:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , use_stable_embedding=__A , )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] ) -> Optional[Any]:
__a = OpenLlamaModel(config=__A )
model.to(__A )
model.eval()
__a = model(__A , attention_mask=__A )
__a = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , ) -> int:
__a = True
__a = OpenLlamaModel(__A )
model.to(__A )
model.eval()
__a = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
__a = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
__a = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , ) -> Tuple:
__a = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
__a = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , ) -> Union[str, Any]:
__a = True
__a = True
__a = OpenLlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
__a = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0]
__a = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ) -> List[str]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
A_ : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A_ : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A_ : str = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : List[Any] = False
A_ : Optional[Any] = False
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__a = OpenLlamaModelTester(self )
__a = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__A )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
__a = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Dict ) -> Dict:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """single_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__A )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
__a = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Any ) -> int:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """multi_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__A )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = OpenLlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
__a = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCAmelCase_ ( self : Any ) -> List[str]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : str ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size )
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = OpenLlamaModel(__A )
original_model.to(__A )
original_model.eval()
__a = original_model(__A ).last_hidden_state
__a = original_model(__A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = {"""type""": scaling_type, """factor""": 10.0}
__a = OpenLlamaModel(__A )
scaled_model.to(__A )
scaled_model.eval()
__a = scaled_model(__A ).last_hidden_state
__a = scaled_model(__A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__A , __A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__A , __A , atol=1E-5 ) )
| 715 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""DeiTFeatureExtractor"""]
__A = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.