code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 16_000 ) -> Tuple:
"""simple docstring"""
A__ = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
A__ = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
UpperCAmelCase__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCAmelCase__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCAmelCase__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
UpperCAmelCase__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''')
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A__ = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ ):
A__ = []
for audio in batch[data_args.audio_column_name]:
A__ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
A__ = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(lowercase_ )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ ):
A__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
A__ = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(lowercase_ )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A__ = raw_datasets['''train'''].features[data_args.label_column_name].names
A__ , A__ = {}, {}
for i, label in enumerate(lowercase_ ):
A__ = str(lowercase_ )
A__ = label
# Load the accuracy metric from the datasets package
A__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
A__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
A__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
# Write model card and (optionally) push to hub
A__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 87 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 3 ,UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) ,UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) ,UpperCamelCase : Tuple[int] = (64,) ,UpperCamelCase : int = 1 ,UpperCamelCase : str = "silu" ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 32 ,UpperCamelCase : int = 256 ,UpperCamelCase : int = 32 ,UpperCamelCase : Optional[int] = None ,UpperCamelCase : float = 0.1_8_2_1_5 ,UpperCamelCase : str = "group" ,) -> List[Any]:
super().__init__()
# pass init params to Encoder
_lowercase : Any = Encoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,down_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,double_z=UpperCamelCase ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
_lowercase : Dict = VectorQuantizer(UpperCamelCase ,UpperCamelCase ,beta=0.2_5 ,remap=UpperCamelCase ,sane_index_shape=UpperCamelCase )
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
# pass init params to Decoder
_lowercase : Optional[Any] = Decoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,up_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,norm_type=UpperCamelCase ,)
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> VQEncoderOutput:
_lowercase : Optional[int] = self.encoder(UpperCamelCase )
_lowercase : Any = self.quant_conv(UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase )
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = False ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Optional[int] = self.quantize(UpperCamelCase )
else:
_lowercase : Tuple = h
_lowercase : Optional[int] = self.post_quant_conv(UpperCamelCase )
_lowercase : Dict = self.decoder(UpperCamelCase ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_lowercase : Dict = sample
_lowercase : Dict = self.encode(UpperCamelCase ).latents
_lowercase : str = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase ) | 125 | 0 |
import torch
def _lowerCamelCase ( ):
"""simple docstring"""
if torch.cuda.is_available():
_lowerCamelCase = torch.cuda.device_count()
else:
_lowerCamelCase = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 713 |
from __future__ import annotations
from typing import Any
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCamelCase = {'''+''', '''-''', '''*''', '''/'''}
_lowerCamelCase = []
for token in postfix_notation:
if token in operations:
_lowerCamelCase , _lowerCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
SCREAMING_SNAKE_CASE_ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
SCREAMING_SNAKE_CASE_ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_lowerCAmelCase )-1}" )
if "norm" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE_ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_lowerCAmelCase )-1}" )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE_ = key[key.find('''block''' ) + len('''block''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F"block{idx}" , F"block.{int(_lowerCAmelCase )-1}" )
if "attn.q" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
SCREAMING_SNAKE_CASE_ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE_ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F"linear_c{idx}" , F"linear_c.{int(_lowerCAmelCase )-1}" )
if "bot_conv" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
SCREAMING_SNAKE_CASE_ = key.replace('''module.last_layer_depth''' , '''head.head''' )
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def a (_lowerCAmelCase , _lowerCAmelCase ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE_ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE_ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE_ = kv_bias[config.hidden_sizes[i] :]
def a ():
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
SCREAMING_SNAKE_CASE_ = GLPNImageProcessor()
# prepare image
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
SCREAMING_SNAKE_CASE_ = torch.load(_lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
SCREAMING_SNAKE_CASE_ = rename_keys(_lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCAmelCase , _lowerCAmelCase )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = GLPNForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234 |
import numpy as np
__SCREAMING_SNAKE_CASE =[
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[str] ):
SCREAMING_SNAKE_CASE_ = np.array(_lowerCamelCase )
def _A ( self: str , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _A ( self: Any , _lowerCamelCase: int , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _A ( self: str , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = message.lower()
SCREAMING_SNAKE_CASE_ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE_ = np.empty((2, len(_lowerCamelCase )) )
for letter_index in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape(2 * len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE_ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = encoded_message + letter
return encoded_message
def _A ( self: Tuple , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = np.empty(2 * len(_lowerCamelCase ) )
for letter_index in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape((2, len(_lowerCamelCase )) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE_ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = decoded_message + letter
return decoded_message
| 234 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCamelCase : Optional[Any] = grid[0]
for row_n in range(1 ,len(lowercase_ ) ):
_UpperCamelCase : int = grid[row_n]
_UpperCamelCase : List[Any] = fill_row(lowercase_ ,lowercase_ )
_UpperCamelCase : List[str] = grid[row_n]
return grid[-1][-1]
def lowercase__ ( lowercase_ ,lowercase_ ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 ,len(lowercase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51 | 1 |
import argparse
import os
import re
import packaging.version
__a = """examples/"""
__a = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
__a = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
__a = """README.md"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("""VERSION""" , lowerCAmelCase_ )
UpperCAmelCase = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern="""examples""" )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) ->Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def _UpperCamelCase ( ) ->List[str]:
UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase = """1. Want to contribute a new model?"""
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase_ )
def _UpperCamelCase ( ) ->List[str]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_=False ) ->int:
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase_ ) == 0:
UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = get_version()
UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase_ ) == 0:
UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
__a = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 377 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 2
while digits < n:
index += 1
UpperCAmelCase = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __snake_case ( a ):
def __init__( self : str , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int]=1024 , _snake_case : Optional[int]=1024 , _snake_case : Optional[int]=3.6):
"""simple docstring"""
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = dataset
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int):
"""simple docstring"""
UpperCAmelCase_ = iter(self.dataset)
UpperCAmelCase_ = True
while more_examples:
UpperCAmelCase_ , UpperCAmelCase_ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_snake_case)['''content'''])
buffer_len += len(buffer[-1])
except StopIteration:
UpperCAmelCase_ = False
break
UpperCAmelCase_ = tokenizer(_snake_case , truncation=_snake_case)['''input_ids''']
UpperCAmelCase_ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0 , len(_snake_case) , self.seq_length):
UpperCAmelCase_ = all_token_ids[i : i + self.seq_length]
if len(_snake_case) == self.seq_length:
yield torch.tensor(_snake_case)
def A (__A : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = {'''streaming''': True}
UpperCAmelCase_ = load_dataset(args.dataset_name , split='''train''' , **__A )
UpperCAmelCase_ = ConstantLengthDataset(__A , __A , seq_length=args.seq_length )
UpperCAmelCase_ = DataLoader(__A , batch_size=args.batch_size )
return eval_dataloader
def A (__A : str ) -> str:
"""simple docstring"""
model.eval()
UpperCAmelCase_ = []
for step, batch in enumerate(__A ):
with torch.no_grad():
UpperCAmelCase_ = model(__A , labels=__A )
UpperCAmelCase_ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase_ = torch.mean(torch.cat(__A ) )
try:
UpperCAmelCase_ = torch.exp(__A )
except OverflowError:
UpperCAmelCase_ = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
snake_case_ : int = Accelerator()
# Parse configuration
snake_case_ : List[Any] = HfArgumentParser(EvaluationArguments)
snake_case_ : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
snake_case_ : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
snake_case_ : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
snake_case_ : Optional[int] = create_dataloader(args)
# Prepare everything with our `accelerator`.
snake_case_ , snake_case_ : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
snake_case_ , snake_case_ : Optional[int] = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 169 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
snake_case_ : Tuple = False
class __snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
image=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 169 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
# initialize config
if "resnet-50" in model_name:
lowerCamelCase : int = ResNetConfig.from_pretrained('microsoft/resnet-50')
elif "resnet-101" in model_name:
lowerCamelCase : str = ResNetConfig.from_pretrained('microsoft/resnet-101')
else:
raise ValueError('Model name should include either resnet50 or resnet101')
lowerCamelCase : str = DetrConfig(use_timm_backbone=UpperCAmelCase__ , backbone_config=UpperCAmelCase__)
# set label attributes
lowerCamelCase : Optional[Any] = 'panoptic' in model_name
if is_panoptic:
lowerCamelCase : Dict = 2_50
else:
lowerCamelCase : List[str] = 91
lowerCamelCase : List[Any] = 'huggingface/label-files'
lowerCamelCase : List[str] = 'coco-detection-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset') , 'r'))
lowerCamelCase : int = {int(UpperCAmelCase__): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase ( UpperCAmelCase__ : Any):
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight'))
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight'))
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias'))
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean'))
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var'))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
))
# 3 convs
for i in range(3):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
])
return rename_keys
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]):
lowerCamelCase : List[Any] = state_dict.pop(UpperCAmelCase__)
lowerCamelCase : Dict = val
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=False):
lowerCamelCase : List[Any] = ''
if is_panoptic:
lowerCamelCase : Tuple = 'detr.'
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''')
lowerCamelCase : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[:2_56, :]
lowerCamelCase : Dict = in_proj_bias[:2_56]
lowerCamelCase : Optional[Any] = in_proj_weight[2_56:5_12, :]
lowerCamelCase : Any = in_proj_bias[2_56:5_12]
lowerCamelCase : Optional[int] = in_proj_weight[-2_56:, :]
lowerCamelCase : Tuple = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
lowerCamelCase : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : int = in_proj_weight[:2_56, :]
lowerCamelCase : int = in_proj_bias[:2_56]
lowerCamelCase : Union[str, Any] = in_proj_weight[2_56:5_12, :]
lowerCamelCase : Optional[int] = in_proj_bias[2_56:5_12]
lowerCamelCase : str = in_proj_weight[-2_56:, :]
lowerCamelCase : Optional[Any] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''')
lowerCamelCase : int = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase : List[Any] = in_proj_weight_cross_attn[:2_56, :]
lowerCamelCase : Any = in_proj_bias_cross_attn[:2_56]
lowerCamelCase : Tuple = in_proj_weight_cross_attn[2_56:5_12, :]
lowerCamelCase : str = in_proj_bias_cross_attn[2_56:5_12]
lowerCamelCase : Dict = in_proj_weight_cross_attn[-2_56:, :]
lowerCamelCase : List[str] = in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : List[str] = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__).raw)
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=False):
lowerCamelCase , lowerCamelCase : List[str] = get_detr_config(UpperCAmelCase__)
# load original model from torch hub
lowerCamelCase : Optional[Any] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''')
lowerCamelCase : List[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase__).eval()
lowerCamelCase : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase__):
if is_panoptic:
lowerCamelCase : Union[str, Any] = 'detr.' + src
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase__ , is_panoptic=UpperCAmelCase__)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase : int = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr')
and not key.startswith('class_labels_classifier')
and not key.startswith('bbox_predictor')
):
lowerCamelCase : Optional[Any] = state_dict.pop(UpperCAmelCase__)
lowerCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase : Dict = state_dict.pop(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = val
elif key.startswith('bbox_attention') or key.startswith('mask_head'):
continue
else:
lowerCamelCase : Dict = state_dict.pop(UpperCAmelCase__)
lowerCamelCase : List[Any] = val
else:
if not key.startswith('class_labels_classifier') and not key.startswith('bbox_predictor'):
lowerCamelCase : List[Any] = state_dict.pop(UpperCAmelCase__)
lowerCamelCase : str = val
# finally, create HuggingFace model and load state dict
lowerCamelCase : Union[str, Any] = DetrForSegmentation(UpperCAmelCase__) if is_panoptic else DetrForObjectDetection(UpperCAmelCase__)
model.load_state_dict(UpperCAmelCase__)
model.eval()
# verify our conversion on an image
lowerCamelCase : Any = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCamelCase : int = DetrImageProcessor(format=UpperCAmelCase__)
lowerCamelCase : List[Any] = processor(images=prepare_img() , return_tensors='pt')
lowerCamelCase : List[str] = encoding['pixel_values']
lowerCamelCase : Optional[int] = detr(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = model(UpperCAmelCase__)
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
processor.save_pretrained(UpperCAmelCase__)
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...')
model.push_to_hub(F'''nielsr/{model_name}''')
processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
A = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 320 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'sentencepiece.bpe.model'}
A = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
A = {
'camembert-base': 512,
}
A = '▁'
class __snake_case ( a__):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=["<s>NOTUSED", "</s>NOTUSED"], A = None, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, additional_special_tokens=A, sp_model_kwargs=self.sp_model_kwargs, **A, )
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowerCamelCase : List[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase : Union[str, Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowerCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
lowerCamelCase : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A, token_ids_a=A, already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.encode(A, out_type=A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = []
lowerCamelCase : Dict = ''
lowerCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(A )
lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.__dict__.copy()
lowerCamelCase : int = None
return state
def __setstate__( self, A ):
"""simple docstring"""
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase : List[str] = {}
lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : str = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A )
elif not os.path.isfile(self.vocab_file ):
with open(A, 'wb' ) as fi:
lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 320 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_UpperCamelCase , n - 1 , _UpperCamelCase ) * a) % mod
else:
__lowerCAmelCase : Dict = binary_exponentiation(_UpperCamelCase , n / 2 , _UpperCamelCase )
return (b * b) % mod
# a prime number
lowerCamelCase__ = 701
lowerCamelCase__ = 1_000_000_000
lowerCamelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 549 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = False ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = F"Expected string as input, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
__lowerCAmelCase : Tuple = input_str.split('_' )
__lowerCAmelCase : int = 0 if use_pascal else 1
__lowerCAmelCase : Any = words[start_index:]
__lowerCAmelCase : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase : Any = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod() | 549 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase__ :List[str] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
__A = input("""Enter Video/IGTV url: """).strip()
__A = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 93 |
'''simple docstring'''
from math import factorial
def _a ( __lowerCAmelCase : int = 20 ):
"""simple docstring"""
snake_case__ : Union[str, Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : List[str] = n // 2
return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase__ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 347 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Union[str, Any] = "bert"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a__ : Tuple = vocab_size
a__ : Union[str, Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : str = hidden_act
a__ : List[Any] = intermediate_size
a__ : str = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Optional[Any] = max_position_embeddings
a__ : Tuple = type_vocab_size
a__ : Union[str, Any] = initializer_range
a__ : str = layer_norm_eps
a__ : Any = position_embedding_type
a__ : Tuple = use_cache
a__ : str = classifier_dropout
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _A ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
a__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 207 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
from __future__ import annotations
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
snake_case = len(UpperCamelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,UpperCamelCase_ ,UpperCamelCase_ ,)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
depth_first_search([] ,[] ,[] ,UpperCamelCase_ ,UpperCamelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase_ )
print('''''' )
print(len(UpperCamelCase_ ) ,'''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 550 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
| 550 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "segformer"
def __init__( self , a__=3 , a__=4 , a__=[2, 2, 2, 2] , a__=[8, 4, 2, 1] , a__=[32, 64, 1_60, 2_56] , a__=[7, 3, 3, 3] , a__=[4, 2, 2, 2] , a__=[1, 2, 5, 8] , a__=[4, 4, 4, 4] , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.02 , a__=0.1 , a__=1E-6 , a__=2_56 , a__=2_55 , **a__ , ):
super().__init__(**a__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , a__ , )
_lowerCamelCase = num_channels
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = depths
_lowerCamelCase = sr_ratios
_lowerCamelCase = hidden_sizes
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = mlp_ratios
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = drop_path_rate
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = decoder_hidden_size
_lowerCamelCase = kwargs.get('''reshape_last_stage''' , a__ )
_lowerCamelCase = semantic_loss_ignore_index
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCAmelCase ( self ):
return 1E-4
@property
def _UpperCAmelCase ( self ):
return 12
| 297 |
import heapq
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_a , [-1 * len(_a ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase = heapq.heappop(_a )[1][0]
chosen_vertices.add(_a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase = elem[1][1].index(_a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 297 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__a ):
__SCREAMING_SNAKE_CASE :str = ["note_seq"]
def __init__( self : int , *a__ : Optional[int] , **a__ : Dict ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__ ( cls : Tuple , *a__ : Dict , **a__ : List[str] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__ ( cls : int , *a__ : Optional[Any] , **a__ : List[Any] ):
requires_backends(cls , ['''note_seq'''] )
| 432 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def _lowercase ( a__ : Optional[Any] , a__ : Any=BITS ) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x * 2_55).int().clamp(0 , 2_55 )
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b c h w -> b c 1 h w" )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(a__ , "b c d h w -> b (c d) h w" )
_UpperCamelCase = bits * 2 - 1
return bits
def _lowercase ( a__ : Optional[Any] , a__ : str=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ , dtype=torch.intaa )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b (c d) h w -> b c d h w" , d=8 )
_UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _lowercase ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : float = 0.0 , a__ : bool = True , a__ : Any=None , a__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(a__ , a__ )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(a__ ) else "cpu"
_UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a__ ).to(a__ )
_UpperCamelCase = self._get_variance(a__ , a__ ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def _lowercase ( self : str , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : int="epsilon" , a__ : int=None , a__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(a__ , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a__ ).to(model_output.device )
_UpperCamelCase = (self._get_variance(a__ , predicted_variance=a__ ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1.0 , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 50 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase_ , )
_UpperCamelCase = decimal_to_bits(lowerCamelCase_ ) * self.bit_scale
_UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_UpperCamelCase = bits_to_decimal(lowerCamelCase_ )
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 147 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __A : Dict , __A : int=10 ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = []
for _ in range(__A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __A : str , __A : List[str]=10 ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = []
for step in range(__A ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : int = os.path.join(__A , """schedule.bin""" )
torch.save(scheduler.state_dict() , __A )
snake_case : Any = torch.load(__A )
scheduler.load_state_dict(__A )
return lrs
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,delta=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = torch.tensor([0.4, 0.2, -0.5] )
snake_case : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case : Tuple = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(100 ):
snake_case : List[str] = criterion(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=SCREAMING_SNAKE_CASE_ )
snake_case : Any = torch.tensor([0.4, 0.2, -0.5] )
snake_case : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
snake_case : Dict = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=SCREAMING_SNAKE_CASE_ ,weight_decay=0.0 ,relative_step=SCREAMING_SNAKE_CASE_ ,scale_parameter=SCREAMING_SNAKE_CASE_ ,warmup_init=SCREAMING_SNAKE_CASE_ ,)
for _ in range(1000 ):
snake_case : Dict = criterion(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
__lowerCamelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__lowerCamelCase : Dict = 1_0
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,delta=SCREAMING_SNAKE_CASE_ ,msg=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
snake_case : Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
snake_case , snake_case : Union[str, Any] = data
snake_case : Optional[Any] = scheduler_func(self.optimizer ,**SCREAMING_SNAKE_CASE_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
snake_case : Tuple = unwrap_schedule(SCREAMING_SNAKE_CASE_ ,self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,tol=1E-2 ,msg=F"""failed for {scheduler_func} in normal scheduler""" ,)
snake_case : List[str] = scheduler_func(self.optimizer ,**SCREAMING_SNAKE_CASE_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE_ ) # wrap to test picklability of the schedule
snake_case : Union[str, Any] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE_ ,self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,msg=F"""failed for {scheduler_func} in save and reload""" )
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = fn
def __call__( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.fn(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = list(map(self ,scheduler.lr_lambdas ) )
| 315 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = max_length
snake_case : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : Tuple = start_length
snake_case : List[str] = max_new_tokens
snake_case : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[str] = max_time
snake_case : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 315 | 1 |
"""simple docstring"""
class _snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : list[int] ):
UpperCAmelCase_ :List[Any] = len(snake_case )
UpperCAmelCase_ :Dict = [0] * len_array
if len_array > 0:
UpperCAmelCase_ :List[Any] = array[0]
for i in range(1 , snake_case ):
UpperCAmelCase_ :Any = self.prefix_sum[i - 1] + array[i]
def snake_case_ ( self : Dict , snake_case : int , snake_case : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ ( self : Tuple , snake_case : int ):
UpperCAmelCase_ :Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a ( __snake_case : List[Any], __snake_case : Union[str, Any], __snake_case : str=1E-12 ):
'''simple docstring'''
UpperCAmelCase_ :int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__snake_case, axis=1 ), a_min=__snake_case ) ).T
UpperCAmelCase_ :List[Any] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__snake_case, axis=1 ), a_min=__snake_case ) ).T
return jnp.matmul(__snake_case, norm_emb_a.T )
class _snake_case ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ =42
UpperCamelCase__ =jnp.floataa
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase_ :Any = nn.Dense(self.config.projection_dim , use_bias=snake_case , dtype=self.dtype )
UpperCAmelCase_ :Tuple = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase_ :Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase_ :Tuple = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCAmelCase_ :List[str] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , snake_case : Dict ):
UpperCAmelCase_ :Union[str, Any] = self.vision_model(snake_case )[1]
UpperCAmelCase_ :Any = self.visual_projection(snake_case )
UpperCAmelCase_ :Any = jax_cosine_distance(snake_case , self.special_care_embeds )
UpperCAmelCase_ :List[str] = jax_cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase_ :List[str] = 0.0
UpperCAmelCase_ :Any = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase_ :List[str] = jnp.round(snake_case , 3 )
UpperCAmelCase_ :Any = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase_ :Tuple = is_special_care * 0.01
UpperCAmelCase_ :int = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase_ :Any = jnp.round(snake_case , 3 )
UpperCAmelCase_ :Optional[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =CLIPConfig
UpperCamelCase__ ="""clip_input"""
UpperCamelCase__ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int , snake_case : CLIPConfig , snake_case : Optional[Tuple] = None , snake_case : int = 0 , snake_case : jnp.dtype = jnp.floataa , snake_case : bool = True , **snake_case : Optional[int] , ):
if input_shape is None:
UpperCAmelCase_ :str = (1, 224, 224, 3)
UpperCAmelCase_ :Optional[int] = self.module_class(config=snake_case , dtype=snake_case , **snake_case )
super().__init__(snake_case , snake_case , input_shape=snake_case , seed=snake_case , dtype=snake_case , _do_init=_do_init )
def snake_case_ ( self : List[str] , snake_case : jax.random.KeyArray , snake_case : Tuple , snake_case : FrozenDict = None ):
# init input tensor
UpperCAmelCase_ :Optional[int] = jax.random.normal(snake_case , snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :Tuple = jax.random.split(snake_case )
UpperCAmelCase_ :Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCAmelCase_ :List[str] = self.module.init(snake_case , snake_case )['''params''']
return random_params
def __call__( self : Dict , snake_case : Optional[int] , snake_case : dict = None , ):
UpperCAmelCase_ :Union[str, Any] = jnp.transpose(snake_case , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(snake_case , dtype=jnp.floataa ) , rngs={} , )
| 608 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(__lowerCAmelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=__lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 718 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'blenderbot-small'
snake_case__ : Optional[Any] = ['past_key_values']
snake_case__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=512 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=512 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase = {0: """batch"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """decoder_sequence"""}
lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(__lowerCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
lowercase = common_inputs["""decoder_input_ids"""].shape[1]
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase = self.num_layers
lowercase = min(__lowerCAmelCase , __lowerCAmelCase )
lowercase = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
lowercase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase , lowercase = self.num_layers
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
lowercase = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
lowercase = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 197 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ='https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase : str ={
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _UpperCamelCase ( lowercase__ ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : str = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__SCREAMING_SNAKE_CASE : str = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = {}
import re
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : int = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : int = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_encoder_block_conv_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_conv_in.sub(lowercase__ , lowercase__ )
elif re_encoder_block_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_encoder_block_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : List[Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Tuple = prefix + resnet_block
__SCREAMING_SNAKE_CASE : int = re_encoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_encoder_block_proj_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = re_encoder_block_proj_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Any = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_proj_out.sub(lowercase__ , lowercase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_decoder_block_conv_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Tuple = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : str = re_decoder_block_conv_out.sub(lowercase__ , lowercase__ )
elif re_decoder_block_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : str = re_decoder_block_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Tuple = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Dict = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Any = re_decoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_decoder_block_proj_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = re_decoder_block_proj_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = regex_match.groups()
__SCREAMING_SNAKE_CASE : int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Any = re_decoder_block_proj_in.sub(lowercase__ , lowercase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_conv_out.match(lowercase__ )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : List[str] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : int = re_prior_cond_conv_out.sub(lowercase__ , lowercase__ )
elif re_prior_cond_resnet.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
__SCREAMING_SNAKE_CASE : Tuple = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : List[str] = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Tuple = re_prior_cond_resnet.sub(lowercase__ , lowercase__ )
elif re_prior_cond_proj_in.fullmatch(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = re_prior_cond_proj_in.match(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_proj_in.sub(lowercase__ , lowercase__ )
# keep original key
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = original_key
__SCREAMING_SNAKE_CASE : Any = replace_key(lowercase__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
__SCREAMING_SNAKE_CASE : Any = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
__SCREAMING_SNAKE_CASE : int = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = original_key
__SCREAMING_SNAKE_CASE : str = value
return new_dict
@torch.no_grad()
def _UpperCamelCase ( lowercase__=None , lowercase__=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
__SCREAMING_SNAKE_CASE : Any = requests.get(F'''{PREFIX}{file}''' , allow_redirects=lowercase__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=lowercase__ )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
__SCREAMING_SNAKE_CASE : str = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__SCREAMING_SNAKE_CASE : List[Any] = JukeboxConfig.from_pretrained(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = JukeboxModel(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Dict = {}
for i, dict_name in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
__SCREAMING_SNAKE_CASE : Tuple = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__SCREAMING_SNAKE_CASE : List[Any] = old_dic[k]
elif k.endswith('''.w''' ):
__SCREAMING_SNAKE_CASE : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__SCREAMING_SNAKE_CASE : Any = old_dic[k]
else:
__SCREAMING_SNAKE_CASE : List[Any] = old_dic[k]
__SCREAMING_SNAKE_CASE : Dict = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
__SCREAMING_SNAKE_CASE : str = fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ )
weight_dict.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase__ )
for i in range(len(lowercase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(lowercase__ , lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase : List[str] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=99 , __UpperCamelCase : Optional[Any]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : Dict=True , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Union[str, Any]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Union[str, Any]=0.0_0_2 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = encoder_seq_length
_UpperCamelCase = decoder_seq_length
# For common tests
_UpperCamelCase = self.decoder_seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = d_ff
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = dropout_rate
_UpperCamelCase = initializer_factor
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = decoder_start_token_id
_UpperCamelCase = None
_UpperCamelCase = decoder_layers
def _UpperCamelCase ( self : Optional[int] ) -> Any:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) -> int:
if attention_mask is None:
_UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_A )
if decoder_head_mask is None:
_UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_A )
if cross_attn_head_mask is None:
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = self.get_config()
_UpperCamelCase = config.num_attention_heads
_UpperCamelCase = self.prepare_inputs_dict(_A , _A , _A )
return config, input_dict
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCamelCase ( self : int ) -> str:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCamelCase ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , ) -> List[str]:
_UpperCamelCase = UMTaModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(
input_ids=_A , decoder_input_ids=_A , attention_mask=_A , decoder_attention_mask=_A , )
_UpperCamelCase = model(input_ids=_A , decoder_input_ids=_A )
_UpperCamelCase = result.last_hidden_state
_UpperCamelCase = result.past_key_values
_UpperCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , ) -> List[Any]:
_UpperCamelCase = UMTaModel(config=_A ).get_decoder().to(_A ).eval()
# first forward pass
_UpperCamelCase = model(_A , use_cache=_A )
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase = model(_A )['''last_hidden_state''']
_UpperCamelCase = model(_A , past_key_values=_A )['''last_hidden_state''']
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , ) -> Tuple:
_UpperCamelCase = UMTaModel(config=_A ).to(_A ).half().eval()
_UpperCamelCase = model(**_A )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_A ).any().item() )
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
snake_case__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case__ = [0.8, 0.9]
def _UpperCamelCase ( self : Optional[int] ) -> int:
_UpperCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_A , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_A )
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
_UpperCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs[0]
_UpperCamelCase = UMTaForConditionalGeneration(_A ).eval()
model.to(_A )
_UpperCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_A ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ),
}
for attn_name, (name, mask) in zip(_A , head_masking.items() ):
_UpperCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_A )
_UpperCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_A , return_dict_in_generate=_A , **_A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_A , legacy=_A )
_UpperCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_UpperCamelCase = tokenizer(_A , return_tensors='''pt''' , padding=_A ).input_ids
# fmt: off
_UpperCamelCase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_A , _A )
_UpperCamelCase = model.generate(input_ids.to(_A ) )
_UpperCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertEqual(_A , _A )
| 705 | """simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : Union[str, Any] , a__ : str ) -> int:
_UpperCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowercase ( a__ : List[str] , a__ : List[Any] ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase ( a__ : List[Any] , a__ : List[str] , a__ : Dict ) -> str:
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowercase ( a__ : List[Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
_UpperCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowercase ( a__ : Any , a__ : List[str] ) -> Tuple:
_UpperCamelCase = ViTConfig(image_size=384 , qkv_bias=a__ )
_UpperCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase = 1024
_UpperCamelCase = 4096
_UpperCamelCase = 24
_UpperCamelCase = 16
_UpperCamelCase = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = False
_UpperCamelCase = '''relu'''
_UpperCamelCase = 1024
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
# load HuggingFace model
_UpperCamelCase = ViTModel(a__ , add_pooling_layer=a__ )
_UpperCamelCase = TrOCRForCausalLM(a__ )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=a__ , decoder=a__ )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' , check_hash=a__ )['''model''']
_UpperCamelCase = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(a__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
# load state dict
model.load_state_dict(a__ )
# Check outputs on an image
_UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_UpperCamelCase = TrOCRProcessor(a__ , a__ )
_UpperCamelCase = processor(images=prepare_img(a__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
_UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase = model(pixel_values=a__ , decoder_input_ids=a__ )
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , a__ , atol=1e-3 ), "First elements of logits not as expected"
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 342 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( a , unittest.TestCase ):
A__ : List[str] = BioGptTokenizer
A__ : Tuple = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case : int = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
snake_case : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = '''lower newer'''
snake_case : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case : Optional[Any] = '''lower'''
snake_case : Union[str, Any] = ['''low''', '''er</w>''']
snake_case : Any = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = tokens + ['''<unk>''']
snake_case : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 598 |
def a_ ( __magic_name__ = 1_000 ) -> int:
"""simple docstring"""
snake_case , snake_case : Optional[Any] = 1, 1
snake_case : Optional[Any] = 2
while True:
snake_case : Dict = 0
snake_case : List[Any] = fa + fa
snake_case , snake_case : Dict = fa, f
index += 1
for _ in str(__magic_name__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 598 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """unispeech"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=0.5 , **__lowercase , ) -> Optional[int]:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :str = hidden_size
__UpperCamelCase :List[str] = feat_extract_norm
__UpperCamelCase :str = feat_extract_activation
__UpperCamelCase :str = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Any = conv_bias
__UpperCamelCase :List[Any] = num_conv_pos_embeddings
__UpperCamelCase :Tuple = num_conv_pos_embedding_groups
__UpperCamelCase :Optional[int] = len(self.conv_dim)
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :Any = hidden_dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :int = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Optional[Any] = layerdrop
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Tuple = num_ctc_classes
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :Dict = use_weighted_layer_sum
__UpperCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Optional[int] = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Any = mask_time_min_masks
__UpperCamelCase :Any = mask_feature_prob
__UpperCamelCase :str = mask_feature_length
__UpperCamelCase :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :Dict = num_codevector_groups
__UpperCamelCase :Optional[int] = contrastive_logits_temperature
__UpperCamelCase :Union[str, Any] = feat_quantizer_dropout
__UpperCamelCase :List[str] = num_negatives
__UpperCamelCase :Union[str, Any] = codevector_dim
__UpperCamelCase :int = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :List[Any] = ctc_loss_reduction
__UpperCamelCase :int = ctc_zero_infinity
# pretraining loss
__UpperCamelCase :Optional[Any] = replace_prob
@property
def UpperCamelCase__ ( self) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowercase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase :str = canny.canny(SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase :Optional[Any] = conv.img_convolve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
assert res.any()
def lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(SCREAMING_SNAKE_CASE , 3 ).any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :Dict = sob.sobel_filter(SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Dict = sp.make_sepia(SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__UpperCamelCase :Dict = bs.Burkes(imread(SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__UpperCamelCase :List[str] = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__UpperCamelCase :Optional[int] = imread(SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :List[Any] = 0
__UpperCamelCase :Dict = image[x_coordinate][y_coordinate]
__UpperCamelCase :int = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase :Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase :Optional[int] = lbp.local_binary_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 452 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : Any = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ : int = logging.WARNING
UpperCAmelCase_ : Optional[Any] = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def A_ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def A_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : int = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : Dict = False
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : int = None
def A_ ( ):
"""simple docstring"""
return log_levels
def A_ ( _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if name is None:
_lowerCamelCase : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : str = False
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : int = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : str = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase )
def A_ ( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : int = warning_advice
@functools.lru_cache(_lowerCAmelCase )
def A_ ( self : int , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : Dict = warning_once
class UpperCAmelCase__ :
def __init__( self : Any,*__A : str,**__A : Tuple ): # pylint: disable=unused-argument
_lowerCamelCase : Any = args[0] if args else None
def __iter__( self : List[Any] ):
return iter(self._iterator )
def __getattr__( self : Any,__A : int ):
def empty_fn(*__A : int,**__A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
return self
def __exit__( self : List[Any],__A : List[str],__A : List[str],__A : List[str] ):
return
class UpperCAmelCase__ :
def __call__( self : List[Any],*__A : Dict,**__A : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm(*__A,**__A )
else:
return EmptyTqdm(*__A,**__A )
def lowerCamelCase_ ( self : Any,*__A : Tuple,**__A : List[Any] ):
_lowerCamelCase : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A,**__A )
def lowerCamelCase_ ( self : Any ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : str = _tqdm_cls()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Any = True
hf_hub_utils.enable_progress_bars()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Any = False
hf_hub_utils.disable_progress_bars() | 44 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = 42
snake_case = 42
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Node | None = None
for i in sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ):
A_ : Dict = Node(_SCREAMING_SNAKE_CASE , self.head )
def __iter__( self )->Iterator[int]:
'''simple docstring'''
A_ : str = self.head
while node:
yield node.data
A_ : Tuple = node.next_node
def __len__( self )->int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )->str:
'''simple docstring'''
return " -> ".join([str(_SCREAMING_SNAKE_CASE ) for node in self] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return SortedLinkedList(list(SCREAMING_SNAKE_CASE ) + list(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 590 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case = """pt"""
elif is_tf_available():
snake_case = """tf"""
else:
snake_case = """jax"""
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Dict = ByTaTokenizer
A_ : Any = False
def _A ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _A ( self : List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def _A ( self : Union[str, Any] , **a__ : Optional[int] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : str , a__ : Any , a__ : Optional[int]=False , a__ : str=20 , a__ : Union[str, Any]=5 ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(a__ ) ):
try:
lowerCAmelCase__ : str = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ : Tuple = list(filter(lambda a__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , a__ ) )
lowerCAmelCase__ : int = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
lowerCAmelCase__ : Any = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
lowerCAmelCase__ : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
lowerCAmelCase__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
lowerCAmelCase__ : Dict = " " + output_txt
lowerCAmelCase__ : Any = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.ta_base_tokenizer
lowerCAmelCase__ : List[Any] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowerCAmelCase__ : int = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.ta_base_tokenizer
lowerCAmelCase__ : List[str] = "Unicode €."
lowerCAmelCase__ : Optional[int] = tokenizer(a__ )
lowerCAmelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
lowerCAmelCase__ : Any = tokenizer.decode(a__ )
self.assertEqual(a__ , "Unicode €.</s>" )
lowerCAmelCase__ : Tuple = tokenizer("e è é ê ë" )
lowerCAmelCase__ : List[str] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
lowerCAmelCase__ : Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.ta_base_tokenizer
lowerCAmelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCAmelCase__ : int = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase__ : Tuple = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
lowerCAmelCase__ : Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCAmelCase__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase__ : Dict = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , a__ )
self.assertIn("attention_mask" , a__ )
self.assertNotIn("decoder_input_ids" , a__ )
self.assertNotIn("decoder_attention_mask" , a__ )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.ta_base_tokenizer
lowerCAmelCase__ : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
lowerCAmelCase__ : str = tokenizer(
text_target=a__ , max_length=32 , padding="max_length" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.ta_base_tokenizer
lowerCAmelCase__ : List[Any] = ["A long paragraph for summarization. </s>"]
lowerCAmelCase__ : Tuple = ["Summary of the text. </s>"]
# fmt: off
lowerCAmelCase__ : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase__ : Optional[int] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase__ : List[Any] = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch["input_ids"][0] )
self.assertEqual(a__ , batch["labels"][0] )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = " He is very happy, UNwant\u00E9d,running"
lowerCAmelCase__ : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase__ : Tuple = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : Any = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCAmelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCAmelCase__ : str = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase__ : Any = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ : str = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase__ : Any = json.load(a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCAmelCase__ : Optional[Any] = json.load(a__ )
lowerCAmelCase__ : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCAmelCase__ : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCAmelCase__ : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(a__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : str = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : Union[str, Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=a__ )]
lowerCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
lowerCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def _A ( self : Optional[int] ):
'''simple docstring'''
pass
def _A ( self : Optional[Any] ):
'''simple docstring'''
pass
def _A ( self : Tuple ):
'''simple docstring'''
pass
def _A ( self : Optional[Any] ):
'''simple docstring'''
pass
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[int] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowerCAmelCase__ : str = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : int = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : int = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + "_id" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + "_id" ) , a__ )
setattr(a__ , attr + "_id" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + "_id" ) , a__ )
setattr(a__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a__ , "additional_special_tokens_ids" ) , [] )
setattr(a__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 721 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Union[str, Any] = OpenAIGPTTokenizer
A_ : Optional[int] = OpenAIGPTTokenizerFast
A_ : Optional[int] = True
A_ : Any = False
def _A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ : List[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ : Tuple = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a__ ) )
def _A ( self : Union[str, Any] , a__ : str ):
'''simple docstring'''
return "lower newer", "lower newer"
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Union[str, Any] = "lower"
lowerCAmelCase__ : List[Any] = ["low", "er</w>"]
lowerCAmelCase__ : Dict = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : Tuple = tokens + ["<unk>"]
lowerCAmelCase__ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def _A ( self : Union[str, Any] , a__ : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
lowerCAmelCase__ : Tuple = "This is a simple input"
lowerCAmelCase__ : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ : int = ("This is a simple input", "This is a pair")
lowerCAmelCase__ : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
def _A ( self : Any ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ ):
pass
| 568 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : List[Any] , A_ : Any ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : List[Any] , A_ : int = 1 , A_ : int = 100 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[float] = None , A_ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
lowerCamelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase_ = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
lowerCamelCase_ = int(A_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
lowerCamelCase_ = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
lowerCamelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 70 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = Dict[str, Any]
lowerCamelCase : Dict = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *A_ : int , **A_ : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a__ ( self : Union[str, Any] , **A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {}
if "threshold" in kwargs:
lowerCamelCase_ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : str , *A_ : Optional[int] , **A_ : Tuple ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(A_ )
lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCamelCase_ = target_size
return inputs
def a__ ( self : Union[str, Any] , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_inputs.pop('target_size' )
lowerCamelCase_ = self.model(**A_ )
lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase_ = model_inputs['bbox']
return model_outputs
def a__ ( self : str , A_ : Any , A_ : Tuple=0.9 ) -> str:
"""simple docstring"""
lowerCamelCase_ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist()
def unnormalize(A_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase_ = [unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase_ = self.image_processor.post_process_object_detection(A_ , A_ , A_ )
lowerCamelCase_ = raw_annotations[0]
lowerCamelCase_ = raw_annotation['scores']
lowerCamelCase_ = raw_annotation['labels']
lowerCamelCase_ = raw_annotation['boxes']
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase_ = [self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a__ ( self : Union[str, Any] , A_ : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist()
lowerCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 70 | 1 |
"""simple docstring"""
def a__ ( __lowercase ) -> list:
if any(not isinstance(__lowercase , __lowercase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowercase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowercase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 621 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Any = batch_size
snake_case : Union[str, Any] = image_size
snake_case : Optional[Any] = patch_size
snake_case : Tuple = num_channels
snake_case : Dict = is_training
snake_case : List[str] = use_labels
snake_case : Optional[int] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : List[Any] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Any = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case : List[str] = (image_size // patch_size) ** 2
snake_case : Optional[Any] = num_patches + 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = FlaxViTModel(config=__lowercase )
snake_case : int = model(__lowercase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case : Dict = (self.image_size, self.image_size)
snake_case : int = (self.patch_size, self.patch_size)
snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = self.type_sequence_label_size
snake_case : Dict = FlaxViTForImageClassification(config=__lowercase )
snake_case : Tuple = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : Dict = 1
snake_case : Any = FlaxViTForImageClassification(__lowercase )
snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Dict = model(__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
(
snake_case
) : int = config_and_inputs
snake_case : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( __UpperCAmelCase , unittest.TestCase ):
a__ : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = FlaxViTModelTester(self )
snake_case : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(__lowercase )
snake_case : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Tuple = self._prepare_for_class(__lowercase , __lowercase )
snake_case : Optional[Any] = model_class(__lowercase )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return model(pixel_values=__lowercase , **__lowercase )
with self.subTest("JIT Enabled" ):
snake_case : Optional[int] = model_jitted(**__lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case : Union[str, Any] = model_jitted(**__lowercase ).to_tuple()
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for jitted_output, output in zip(__lowercase , __lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : Dict = model_class_name.from_pretrained("google/vit-base-patch16-224" )
snake_case : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowercase )
| 134 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase__ :Union[str, Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowercase__ :List[Any] = parser.parse_args()
lowercase__ :Tuple = 'cpu'
lowercase__ :Any = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowercase__ :Optional[int] = 'path-to-your-trained-model'
lowercase__ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase__ :Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase__ :List[str] = pipe.to(device)
# to channels last
lowercase__ :List[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase__ :Dict = pipe.vae.to(memory_format=torch.channels_last)
lowercase__ :str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase__ :Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase__ :Optional[int] = torch.randn(2, 4, 6_4, 6_4)
lowercase__ :str = torch.rand(1) * 9_9_9
lowercase__ :Optional[int] = torch.randn(2, 7_7, 7_6_8)
lowercase__ :str = (sample, timestep, encoder_hidden_status)
try:
lowercase__ :Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase__ :Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase__ :Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase__ :Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase__ :Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase__ :Any = 6_6_6
lowercase__ :Tuple = torch.Generator(device).manual_seed(seed)
lowercase__ :Tuple = {'generator': generator}
if args.steps is not None:
lowercase__ :List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase__ :List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 522 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = 0
while len(_UpperCAmelCase ) > 1:
lowerCamelCase_ : Tuple = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase_ : Union[str, Any] = files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : str = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 0 |
import warnings
from .generation import TFGenerationMixin
class lowercase_ ( _UpperCAmelCase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _UpperCAmelCase , )
| 443 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a_ = logging.getLogger(__name__)
a_ = {'''facebook/bart-base''': BartForConditionalGeneration}
a_ = {'''facebook/bart-base''': BartTokenizer}
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=UpperCamelCase_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCamelCase_ , )
parser.add_argument(
"--config_name" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=UpperCamelCase_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Where to store the final ONNX file." )
lowerCAmelCase__ = parser.parse_args()
return args
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str="cpu" ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = model_dict[model_name].from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase__ = tokenizer_dict[model_name].from_pretrained(UpperCamelCase_ )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
return huggingface_model, tokenizer
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase_ ) )
with torch.no_grad():
lowerCAmelCase__ = "My friends are cool but they eat too many carbs."
lowerCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
lowerCAmelCase__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=UpperCamelCase_ , max_length=UpperCamelCase_ , early_stopping=UpperCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=UpperCamelCase_ , )
logger.info("Model exported to {}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = remove_dup_initializers(os.path.abspath(UpperCamelCase_ ) )
logger.info("Deduplicated and optimized model written to {}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = onnxruntime.InferenceSession(UpperCamelCase_ )
lowerCAmelCase__ = ort_sess.run(
UpperCamelCase_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(UpperCamelCase_ ),
"max_length": np.array(UpperCamelCase_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = parse_args()
lowerCAmelCase__ = 5
lowerCAmelCase__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = torch.device(args.device )
lowerCAmelCase__ , lowerCAmelCase__ = load_model_tokenizer(args.model_name_or_path , UpperCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(UpperCamelCase_ )
if args.max_length:
lowerCAmelCase__ = args.max_length
if args.num_beams:
lowerCAmelCase__ = args.num_beams
if args.output_file_path:
lowerCAmelCase__ = args.output_file_path
else:
lowerCAmelCase__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 339 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Tuple = '''dpt'''
def __init__( self : str , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : str=30_72 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1e-1_2 , _UpperCAmelCase : Dict=3_84 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=[2, 5, 8, 11] , _UpperCAmelCase : Optional[int]="project" , _UpperCAmelCase : Optional[Any]=[4, 2, 1, 0.5] , _UpperCAmelCase : List[str]=[96, 1_92, 3_84, 7_68] , _UpperCAmelCase : Any=2_56 , _UpperCAmelCase : Union[str, Any]=-1 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=0.4 , _UpperCAmelCase : Union[str, Any]=2_55 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=[1, 10_24, 24, 24] , _UpperCAmelCase : List[str]=[0, 1] , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
__lowercase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
__lowercase = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
__lowercase = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__lowercase = backbone_featmap_shape
__lowercase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
__lowercase = readout_type
__lowercase = reassemble_factors
__lowercase = neck_hidden_sizes
__lowercase = fusion_hidden_size
__lowercase = head_in_index
__lowercase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowercase = use_auxiliary_head
__lowercase = auxiliary_loss_weight
__lowercase = semantic_loss_ignore_index
__lowercase = semantic_classifier_dropout
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
snake_case_ = "CIDAS/clipseg-rd64-refined"
snake_case_ = "image_segmenter"
snake_case_ = CLIPSegForImageSegmentation
snake_case_ = ["image", "text"]
snake_case_ = ["image"]
def __init__( self : List[Any] ,*A : List[Any] ,**A : Dict ):
requires_backends(self ,["vision"] )
super().__init__(*A ,**A )
def UpperCamelCase_ ( self : int ,A : "Image" ,A : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=A ,return_tensors="pt" )
def UpperCamelCase_ ( self : str ,A : List[Any] ):
with torch.no_grad():
__A = self.model(**A ).logits
return logits
def UpperCamelCase_ ( self : List[str] ,A : str ):
__A = outputs.cpu().detach().numpy()
__A = 0
__A = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 55 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """data2vec-audio"""
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = conv_pos_kernel_size
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = xvector_output_dim
@property
def _lowercase ( self ):
return math.prod(self.conv_stride )
| 508 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE_ : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(__a )
DownloadCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
RunCommand.register_subcommand(__a )
ServeCommand.register_subcommand(__a )
UserCommands.register_subcommand(__a )
AddNewModelCommand.register_subcommand(__a )
AddNewModelLikeCommand.register_subcommand(__a )
LfsCommands.register_subcommand(__a )
PTtoTFCommand.register_subcommand(__a )
# Let's go
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.func(__a )
service.run()
if __name__ == "__main__":
main()
| 176 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase_ : Tuple = """pt"""
elif is_tf_available():
UpperCAmelCase_ : Optional[Any] = """tf"""
else:
UpperCAmelCase_ : str = """jax"""
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ByTaTokenizer
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : Dict = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=False , lowercase_ : Tuple=20 , lowercase_ : List[str]=5):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = []
for i in range(len(lowercase_)):
try:
SCREAMING_SNAKE_CASE_ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda lowercase_: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , lowercase_))
SCREAMING_SNAKE_CASE_ : Any = list(filter(lambda lowercase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase_) , lowercase_))
if max_length is not None and len(lowercase_) > max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = toks[:max_length]
if min_length is not None and len(lowercase_) < min_length and len(lowercase_) > 0:
while len(lowercase_) < min_length:
SCREAMING_SNAKE_CASE_ : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
if " " not in output_txt and len(lowercase_) > 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_)
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ : List[str] = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
SCREAMING_SNAKE_CASE_ : Any = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Any = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowercase_)
# decoding
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , '''Unicode €.</s>''')
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer('''e è é ê ë''')
SCREAMING_SNAKE_CASE_ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , lowercase_)
# decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(batch.input_ids.numpy()[0])
else:
SCREAMING_SNAKE_CASE_ : int = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowercase_ , lowercase_)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase_)
self.assertIn('''attention_mask''' , lowercase_)
self.assertNotIn('''decoder_input_ids''' , lowercase_)
self.assertNotIn('''decoder_attention_mask''' , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(
text_target=lowercase_ , max_length=32 , padding='''max_length''' , truncation=lowercase_ , return_tensors=lowercase_)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ : Tuple = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_ , text_target=lowercase_)
self.assertEqual(lowercase_ , batch['''input_ids'''][0])
self.assertEqual(lowercase_ , batch['''labels'''][0])
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = tokenizer.__class__.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
shutil.rmtree(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Any = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
SCREAMING_SNAKE_CASE_ : str = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.__class__.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.__class__.from_pretrained(lowercase_ , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_)
with open(os.path.join(lowercase_ , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
SCREAMING_SNAKE_CASE_ : List[Any] = json.load(lowercase_)
with open(os.path.join(lowercase_ , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(lowercase_)
SCREAMING_SNAKE_CASE_ : str = [F'<extra_id_{i}>' for i in range(125)]
SCREAMING_SNAKE_CASE_ : Optional[int] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ : int = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase_ , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowercase_ , lowercase_)
with open(os.path.join(lowercase_ , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowercase_ , lowercase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_class.from_pretrained(
lowercase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase_)]
SCREAMING_SNAKE_CASE_ : int = tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = tokenizer_class.from_pretrained(lowercase_)
self.assertTrue(tokenizer.decode([255]) == '''''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
SCREAMING_SNAKE_CASE_ : Any = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_string(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
SCREAMING_SNAKE_CASE_ : int = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_ids_to_tokens(
lowercase_ , skip_special_tokens=lowercase_)
for attr in attributes_list:
setattr(lowercase_ , attr + '''_id''' , lowercase_)
self.assertEqual(getattr(lowercase_ , lowercase_) , lowercase_)
self.assertEqual(getattr(lowercase_ , attr + '''_id''') , lowercase_)
setattr(lowercase_ , attr + '''_id''' , lowercase_)
self.assertEqual(getattr(lowercase_ , lowercase_) , lowercase_)
self.assertEqual(getattr(lowercase_ , attr + '''_id''') , lowercase_)
setattr(lowercase_ , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(lowercase_ , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(lowercase_ , '''additional_special_tokens_ids''') , [])
setattr(lowercase_ , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(lowercase_ , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(lowercase_ , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 176 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = np.argmax(_UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf_8''' ) as f:
snake_case_ : List[str] = csv.reader(_UpperCamelCase )
snake_case_ : Dict = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = []
for dataset in encoded_datasets:
snake_case_ : List[str] = len(_UpperCamelCase )
snake_case_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
snake_case_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : List[Any] = with_conta
snake_case_ : List[str] = with_conta
snake_case_ : Optional[Any] = len(_UpperCamelCase ) - 1
snake_case_ : int = len(_UpperCamelCase ) - 1
snake_case_ : Optional[Any] = with_conta
snake_case_ : Union[str, Any] = with_conta
snake_case_ : Any = mc_label
snake_case_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : List[Any] = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase , _UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ : Dict = ['''_start_''', '''_delimiter_''', '''_classify_''']
snake_case_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
snake_case_ : Tuple = load_rocstories_dataset(args.train_dataset )
snake_case_ : Dict = load_rocstories_dataset(args.eval_dataset )
snake_case_ : Optional[Any] = (train_dataset, eval_dataset)
snake_case_ : Union[str, Any] = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
snake_case_ : Dict = model.config.n_positions // 2 - 2
snake_case_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ : str = min(_UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ : int = pre_process_datasets(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : str = tensor_datasets[0], tensor_datasets[1]
snake_case_ : List[str] = TensorDataset(*_UpperCamelCase )
snake_case_ : int = RandomSampler(_UpperCamelCase )
snake_case_ : Union[str, Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.train_batch_size )
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : str = SequentialSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ : Tuple = args.max_steps
snake_case_ : Union[str, Any] = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ : List[Any] = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ : Optional[Any] = list(model.named_parameters() )
snake_case_ : List[str] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
snake_case_ : str = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
snake_case_ : Optional[int] = AdamW(_UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ : Optional[int] = get_linear_schedule_with_warmup(
_UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCamelCase )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = tqdm(_UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
snake_case_ : Dict = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
snake_case_ : Union[str, Any] = model(_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ : List[Any] = model.module if hasattr(_UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ : Any = os.path.join(args.output_dir , _UpperCamelCase )
snake_case_ : Optional[Any] = os.path.join(args.output_dir , _UpperCamelCase )
torch.save(model_to_save.state_dict() , _UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ : str = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ : str = 0, 0
snake_case_ , snake_case_ : Any = 0, 0
for batch in tqdm(_UpperCamelCase , desc='''Evaluating''' ):
snake_case_ : Any = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = model(
_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : Optional[Any] = mc_logits.detach().cpu().numpy()
snake_case_ : List[Any] = mc_labels.to('''cpu''' ).numpy()
snake_case_ : Any = accuracy(_UpperCamelCase , _UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ : Any = eval_loss / nb_eval_steps
snake_case_ : Optional[int] = eval_accuracy / nb_eval_examples
snake_case_ : int = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ : Optional[Any] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
snake_case_ : List[str] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : List[Any]=2_81_23 ) -> str:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_UpperCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 69 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def lowercase ( self : List[str] ) -> Optional[int]:
raise NotImplementedError()
| 421 |
_snake_case : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_snake_case : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_snake_case : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_snake_case : Any = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_snake_case : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_snake_case : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_snake_case : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_snake_case : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 421 | 1 |
__UpperCamelCase : Tuple = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : Tuple = credit_card_number
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Any = len(__UpperCAmelCase ) - 2
for i in range(__UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ : List[Any] = cc_number[:i] + str(__UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : str = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(__UpperCAmelCase ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(__UpperCAmelCase ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(__UpperCAmelCase ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 253 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A : int = logging.getLogger(__name__)
def lowerCAmelCase_ ( a : List[str] , a : Dict , a : Any = None , a : List[Any] = None , a : Optional[int] = None , a : Tuple = None , a : Optional[int] = None , a : Union[str, Any] = False , ):
a__ = bnb_quantization_config.load_in_abit
a__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a__ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
a__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a__ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
a__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a__ = []
a__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
a__ = load_in_abit
a__ = load_in_abit
a__ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a__ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
a__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a__ = name.replace('.weight' , '' ).replace('.bias' , '' )
a__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
a__ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
a__ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a__ = True
a__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( a : Optional[int] , a : Dict , a : Dict=None , a : Union[str, Any]=None , a : Tuple=None ):
if device_map is None:
if torch.cuda.is_available():
a__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a__ = {}
a__ = special_dtypes
a__ = no_split_module_classes
a__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a__ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
a__ = max_memory
a__ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
a__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( a : Optional[Any] , a : List[str] , a : Any=None , a : Optional[int]=None ):
if modules_to_not_convert is None:
a__ = []
a__ , a__ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCAmelCase_ ( a : Dict , a : Any , a : Dict=None , a : List[str]=None , ):
a__ = False
for name, module in model.named_children():
if current_key_name is None:
a__ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a__ = '.'.join(_SCREAMING_SNAKE_CASE )
a__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a__ = module.weight.data
if module.bias is not None:
a__ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ = True
if len(list(module.children() ) ) > 0:
a__ , a__ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( a : Tuple ):
# Create a copy of the model
with init_empty_weights():
a__ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a__ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a__ = sum(_SCREAMING_SNAKE_CASE , [] )
a__ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
a__ = False
if hasattr(_SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
a__ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a__ = list(model.named_children() )
a__ = [list_modules[-1][0]]
# add last module together with tied weights
a__ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
a__ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
a__ = ['.weight', '.bias']
a__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a__ = name.replace(_SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def lowerCAmelCase_ ( a : Tuple ):
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( a : Dict ):
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( a : Union[str, Any] , a : Any , a : int , a : Tuple , a : str , a : List[Any] , a : int ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
a__ = param_name
a__ = model
if "." in tensor_name:
a__ = tensor_name.split('.' )
for split in splits[:-1]:
a__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
a__ = new_module
a__ = splits[-1]
# offload weights
a__ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'meta' , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 702 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[int] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Tuple = np.array(data['target'])
__A : int = data['target_names']
__A , __A , __A , __A : Optional[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( a : str , a : int ):
return np.linalg.norm(np.array(a ) - np.array(a ) )
def lowerCAmelCase_ ( a : Dict , a : str , a : List[Any] , a : Any , a : Dict=5 ):
a__ = zip(a , a )
# List of distances of all points from the point to be classified
a__ = []
for data_point in data:
a__ = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a__ = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a = "naver-clova-ix/donut-base"
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DonutProcessor.from_pretrained(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__SCREAMING_SNAKE_CASE = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__SCREAMING_SNAKE_CASE = self.processor.tokenajson(lowerCamelCase )
self.assertDictEqual(lowerCamelCase ,lowerCamelCase )
| 109 |
"""simple docstring"""
A_ = 2_56
# Modulus to hash a string
A_ = 1_00_00_03
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Any = len(snake_case__ )
_snake_case : Tuple = len(snake_case__ )
if p_len > t_len:
return False
_snake_case : str = 0
_snake_case : Tuple = 0
_snake_case : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_snake_case : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = """abc1abc12"""
_snake_case : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_snake_case : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
_snake_case : Tuple = """ABABX"""
_snake_case : Optional[Any] = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
_snake_case : Union[str, Any] = """AAAB"""
_snake_case : str = """ABAAAAAB"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
_snake_case : List[str] = """abcdabcy"""
_snake_case : Optional[int] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
_snake_case : Union[str, Any] = """Lü"""
_snake_case : Optional[int] = """Lüsai"""
assert rabin_karp(snake_case__ , snake_case__ )
_snake_case : Any = """Lue"""
assert not rabin_karp(snake_case__ , snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 609 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCamelCase : Any = parse(importlib.metadata.version("""torch"""))
def A__ ( _a : Union[str, Version] , _a : str , _a : str ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
snake_case__ : List[str] =STR_OPERATION_TO_FUNC[operation]
if isinstance(_a , _a ):
snake_case__ : Optional[int] =parse(importlib.metadata.version(_a ) )
return operation(_a , parse(_a ) )
def A__ ( _a : str , _a : str ):
'''simple docstring'''
return compare_versions(_a , _a , _a )
| 448 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( _a : np.ndarray , _a : Union[int, Iterable[int]] , _a : bool , _a : int ):
'''simple docstring'''
def constraint_to_multiple_of(_a : Union[str, Any] , _a : List[str] , _a : str=0 , _a : Any=None ):
snake_case__ : Any =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case__ : int =math.floor(val / multiple ) * multiple
if x < min_val:
snake_case__ : Dict =math.ceil(val / multiple ) * multiple
return x
snake_case__ : str =(output_size, output_size) if isinstance(_a , _a ) else output_size
snake_case__ , snake_case__ : Dict =get_image_size(_a )
snake_case__ , snake_case__ : int =output_size
# determine new height and width
snake_case__ : Tuple =output_height / input_height
snake_case__ : Optional[Any] =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case__ : Optional[int] =scale_width
else:
# fit height
snake_case__ : Any =scale_height
snake_case__ : Optional[int] =constraint_to_multiple_of(scale_height * input_height , multiple=_a )
snake_case__ : str =constraint_to_multiple_of(scale_width * input_width , multiple=_a )
return (new_height, new_width)
class _lowercase ( _A ):
_a : List[Any] = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
snake_case__ : Any =size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
snake_case__ : List[Any] =get_size_dict(a )
snake_case__ : Tuple =do_resize
snake_case__ : Tuple =size
snake_case__ : Any =keep_aspect_ratio
snake_case__ : List[Any] =ensure_multiple_of
snake_case__ : Tuple =resample
snake_case__ : str =do_rescale
snake_case__ : int =rescale_factor
snake_case__ : Tuple =do_normalize
snake_case__ : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Any =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
snake_case__ : Tuple =get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case__ : Optional[int] =get_resize_output_image_size(
a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def lowercase__ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def lowercase__ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowercase__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
snake_case__ : Optional[int] =do_resize if do_resize is not None else self.do_resize
snake_case__ : List[Any] =size if size is not None else self.size
snake_case__ : int =get_size_dict(a )
snake_case__ : Optional[int] =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case__ : int =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case__ : Optional[Any] =resample if resample is not None else self.resample
snake_case__ : Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Tuple =image_mean if image_mean is not None else self.image_mean
snake_case__ : int =image_std if image_std is not None else self.image_std
snake_case__ : int =make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : int =[to_numpy_array(a ) for image in images]
if do_resize:
snake_case__ : List[str] =[self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
snake_case__ : List[Any] =[self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
snake_case__ : str =[self.normalize(image=a , mean=a , std=a ) for image in images]
snake_case__ : List[Any] =[to_channel_dimension_format(a , a ) for image in images]
snake_case__ : Union[str, Any] ={"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
def lowercase__ ( self , a , a = None ):
snake_case__ : Optional[Any] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(a ):
snake_case__ : Optional[Any] =target_sizes.numpy()
snake_case__ : Optional[Any] =[]
for idx in range(len(a ) ):
snake_case__ : List[str] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=a )
snake_case__ : List[Any] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
snake_case__ : List[str] =logits.argmax(dim=1 )
snake_case__ : Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 448 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowerCAmelCase : str ="""hf-internal-testing/tiny-random-bert"""
_lowerCAmelCase : int =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
_lowerCAmelCase : Any ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = cached_file(lowerCamelCase__ , lowerCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ) )
with open(os.path.join(lowerCamelCase__ , "refs" , "main" ) ) as f:
UpperCAmelCase__: Optional[int] = f.read()
self.assertEqual(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "snapshots" , lowerCamelCase__ , lowerCamelCase__ ) )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
# File is cached at the same place the second time.
UpperCAmelCase__: Union[str, Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# Using a specific revision to test the full commit hash.
UpperCAmelCase__: Dict = cached_file(lowerCamelCase__ , lowerCamelCase__ , revision="9b8c223" )
self.assertEqual(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "snapshots" , lowerCamelCase__ , lowerCamelCase__ ) )
def _UpperCAmelCase ( self ):
with self.assertRaisesRegex(lowerCamelCase__ , "is not a valid model identifier" ):
UpperCAmelCase__: Union[str, Any] = cached_file("tiny-random-bert" , lowerCamelCase__ )
with self.assertRaisesRegex(lowerCamelCase__ , "is not a valid git identifier" ):
UpperCAmelCase__: List[Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(lowerCamelCase__ , "does not appear to have a file named" ):
UpperCAmelCase__: List[str] = cached_file(lowerCamelCase__ , "conf" )
def _UpperCAmelCase ( self ):
with self.assertRaisesRegex(lowerCamelCase__ , "does not appear to have a file named" ):
UpperCAmelCase__: int = cached_file(lowerCamelCase__ , "conf" )
with open(os.path.join(lowerCamelCase__ , "refs" , "main" ) ) as f:
UpperCAmelCase__: Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , ".no_exist" , lowerCamelCase__ , "conf" ) ) )
UpperCAmelCase__: List[str] = cached_file(lowerCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=lowerCamelCase__ )
self.assertIsNone(lowerCamelCase__ )
UpperCAmelCase__: List[Any] = cached_file(lowerCamelCase__ , "conf" , local_files_only=lowerCamelCase__ , _raise_exceptions_for_missing_entries=lowerCamelCase__ )
self.assertIsNone(lowerCamelCase__ )
UpperCAmelCase__: Dict = mock.Mock()
UpperCAmelCase__: str = 5_0_0
UpperCAmelCase__: Union[str, Any] = {}
UpperCAmelCase__: int = HTTPError
UpperCAmelCase__: Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
UpperCAmelCase__: Optional[int] = cached_file(lowerCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=lowerCamelCase__ )
self.assertIsNone(lowerCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCamelCase__ ) )
def _UpperCAmelCase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCamelCase__ , revision="ahaha" )
UpperCAmelCase__: Optional[int] = get_file_from_repo("bert-base-cased" , lowerCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase__: Any = json.loads(open(lowerCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_6_8 )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: List[str] = Path(lowerCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase__ , "a.txt" ) , str(lowerCamelCase__ ) )
self.assertIsNone(get_file_from_repo(lowerCamelCase__ , "b.txt" ) ) | 113 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : int ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Dict =direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : int =re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowerCAmelCase : Union[str, Any] ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = None
# source code of `config_class`
UpperCAmelCase__: Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCAmelCase__: Any = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__: int = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__: Optional[int] = ckpt_name
break
return checkpoint
def _A ( ):
UpperCAmelCase__: List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__: Optional[Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase__: Dict = "\n".join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 113 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : str = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 516 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar('''T''')
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =None
__A =len(lowercase__ )
__A =[any_type for _ in range(self.N )] + arr
__A =fnc
self.build()
def __UpperCamelCase ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
p += self.N
__A =v
while p > 1:
__A =p // 2
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ): # noqa: E741
'''simple docstring'''
__A , __A =l + self.N, r + self.N
__A =None
while l <= r:
if l % 2 == 1:
__A =self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
__A =self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
__A , __A =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase : Dict = SegmentTree(test_array, min)
_lowerCamelCase : int = SegmentTree(test_array, max)
_lowerCamelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ) ->None:
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 516 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=0 , ):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : str = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = projection_dim
def _A ( self : int ):
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFDPRContextEncoder(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _A ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Dict = TFDPRReader(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Optional[int] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase_ : int = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : str = False
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : Any ):
self.config_tester.run_common_tests()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def _A ( self : Tuple ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''markuplm'''
def __init__( self : int , UpperCAmelCase_ : List[str]=3_0522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : str=1024 , UpperCAmelCase_ : List[str]=216 , UpperCAmelCase_ : List[Any]=1001 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=50 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Any , ):
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Any = max_depth
SCREAMING_SNAKE_CASE : int = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : List[str] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = tag_pad_id
SCREAMING_SNAKE_CASE : Optional[Any] = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 488 | 0 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = number
while duplicate > 0:
__lowerCamelCase , __lowerCamelCase : Dict = divmod(SCREAMING_SNAKE_CASE__ , 10 )
fact_sum += factorial(SCREAMING_SNAKE_CASE__ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
lowercase_ = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 230 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = RoFormerTokenizer
__snake_case = RoFormerTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: Dict ):
super().setUp()
def _snake_case ( self: Optional[int] , **a: Tuple ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a )
def _snake_case ( self: Tuple , **a: int ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = '永和服装饰品有限公司,今天天气非常好'
__lowerCamelCase : str = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase , __lowerCamelCase : str = self.get_chinese_input_output_texts()
__lowerCamelCase : Any = tokenizer.tokenize(a )
self.assertListEqual(a , output_text.split() )
__lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
__lowerCamelCase : str = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = self.get_rust_tokenizer()
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_chinese_input_output_texts()
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , output_text.split() )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : Any = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _snake_case ( self: int ):
pass
def _snake_case ( self: Optional[Any] ):
pass
def _snake_case ( self: str ):
pass
| 230 | 1 |
'''simple docstring'''
import json
import sys
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE,encoding='utf-8' ) as f:
__A= json.load(_SCREAMING_SNAKE_CASE )
__A= ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
__A= results[benchmark_name]
__A= benchmark_name.split('/' )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
__A= '| metric |'
__A= '|--------|'
__A= '| new / old (diff) |'
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
__A= benchmark_res[metric_name]
__A= metric_vals['new']
__A= metric_vals.get('old',_SCREAMING_SNAKE_CASE )
__A= metric_vals.get('diff',_SCREAMING_SNAKE_CASE )
__A= f""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE,(int, float) ) else 'None'
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE,(int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_SCREAMING_SNAKE_CASE,'w',encoding='utf-8' ) as f:
f.writelines('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase__ = sys.argv[1]
UpperCAmelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 186 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ = 1_0_0
UpperCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A= set()
__A= 42
__A= 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1,_SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self :Optional[int] ):
A = 1
A = 3
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def lowerCamelCase ( self :int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self :List[str] ):
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self :Tuple ):
torch.manual_seed(0 )
A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__UpperCamelCase )
@property
def lowerCamelCase ( self :str ):
def extract(*__UpperCamelCase :List[Any] , **__UpperCamelCase :Optional[Any] ):
class _UpperCAmelCase :
def __init__( self :Optional[int] ):
A = torch.ones([0] )
def lowerCamelCase ( self :int , __UpperCamelCase :Union[str, Any] ):
self.pixel_values.to(__UpperCamelCase )
return self
return Out()
return extract
def lowerCamelCase ( self :Optional[int] ):
A = "cpu" # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A = 77
A = self.dummy_image.to(__UpperCamelCase )
A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
A = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A painting of a squirrel eating a burger"
A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
A = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCamelCase , )
A = output.images
A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
A = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self :Dict ):
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A = 77
A = self.dummy_image.to(__UpperCamelCase )
# put models in fp16
A = unet.half()
A = vae.half()
A = bert.half()
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
A = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A painting of a squirrel eating a burger"
A = torch.manual_seed(0 )
A = alt_pipe(
[prompt] , generator=__UpperCamelCase , num_inference_steps=2 , output_type="np" , image=__UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self :Tuple ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
A = init_image.resize((7_60, 5_04) )
A = "BAAI/AltDiffusion"
A = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type="np" , )
A = output.images[0]
A = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :List[str] ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
A = "BAAI/AltDiffusion"
A = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type="np" , )
A = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 524 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = 'T5Config'
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 524 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[Any] = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 542 |
import string
from math import logaa
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
A : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
A : Dict = corpus_without_punctuation.split('''\n''' )
A : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase_ ))
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return round(tf * idf , 3 )
| 542 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCAmelCase : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
__UpperCAmelCase : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCAmelCase : int = {"unk_token": "<unk>"}
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCamelCase_))
__UpperCAmelCase : int = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase_)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : List[Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : List[str]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : List[str] , **UpperCamelCase_ : Tuple):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_)
__UpperCAmelCase : int = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_)
__UpperCAmelCase : List[str] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : int = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = image_processor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : List[Any] = processor(images=UpperCamelCase_ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : List[str] = "lower newer"
__UpperCAmelCase : Any = processor(text=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase_ , return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : int = "lower newer"
__UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Tuple = ["cat", "nasa badge"]
__UpperCAmelCase : List[str] = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[Any] = 16
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : str = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Dict = [["cat", "nasa badge"], ["person"]]
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = 16
__UpperCAmelCase : Optional[int] = len(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = max([len(UpperCamelCase_) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = ["cat", "nasa badge"]
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 16
__UpperCAmelCase : Optional[int] = inputs["input_ids"]
__UpperCAmelCase : Optional[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Any = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[int] = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : int = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 487 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class a__ :
lowercase_ = None
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCAmelCase : Any = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "feat_extract.json")
feat_extract_first.to_json_file(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Tuple = feat_extract_first.save_pretrained(UpperCamelCase_)[0]
check_json_file_has_correct_format(UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase_)
| 487 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ : Any = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> Optional[Any]:
A : Tuple = parent
A : Any = batch_size
A : List[Any] = image_size
A : Tuple = num_channels
A : Any = num_stages
A : Any = hidden_sizes
A : List[str] = depths
A : str = is_training
A : Any = use_labels
A : Any = intermediate_size
A : List[str] = hidden_act
A : List[Any] = type_sequence_label_size
A : Optional[int] = initializer_range
A : Tuple = out_features
A : Tuple = num_labels
A : Tuple = scope
A : int = num_stages
def snake_case ( self ) -> Optional[Any]:
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Any = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ) -> Optional[int]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case ( self ) -> Tuple:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : List[Any] = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : int = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ) -> Tuple:
A : List[str] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = False
def snake_case ( self ) -> Union[str, Any]:
A : int = UperNetModelTester(self )
A : Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ) -> Optional[int]:
return
def snake_case ( self ) -> Any:
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(__UpperCAmelCase )
A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ) -> List[str]:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self ) -> int:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self ) -> int:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self ) -> Optional[Any]:
pass
def snake_case ( self ) -> Union[str, Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
A : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[str] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Any = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = _config_zero_init(__UpperCAmelCase )
A : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A : Any = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self ) -> Tuple:
pass
@slow
def snake_case ( self ) -> str:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case__ ( ):
A : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
A : Optional[Any] = Image.open(lowerCamelCase_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Dict:
A : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
A : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__UpperCAmelCase )
A : List[str] = prepare_img()
A : str = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
A : str = model(**__UpperCAmelCase )
A : Optional[int] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
A : List[Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ) -> int:
A : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
A : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__UpperCAmelCase )
A : str = prepare_img()
A : List[Any] = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
A : List[Any] = model(**__UpperCAmelCase )
A : List[str] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
A : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 542 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A:
@staticmethod
def lowercase__ ( *__UpperCamelCase : str , **__UpperCamelCase : Dict ):
pass
def __lowerCAmelCase ( UpperCAmelCase__ : Image ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ):
lowerCamelCase_ = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __UpperCamelCase )
import datasets
lowerCamelCase_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase_ = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = """Intel/dpt-large"""
lowerCamelCase_ = pipeline("""depth-estimation""" , model=__UpperCamelCase )
lowerCamelCase_ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase_ = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : Dict ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 103 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A( UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : Any ):
raise NotImplementedError()
| 103 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.get_masked_index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
if return_tensors is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.framework
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.ensure_exactly_one_mask_token(lowerCAmelCase__ )
return model_inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = model_inputs['input_ids']
return model_outputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE_ : Dict = target_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE_ : Dict = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.numpy()
SCREAMING_SNAKE_CASE_ : str = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = stable_softmax(lowerCAmelCase__ , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : Any = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE_ : str = tf.expand_dims(lowerCAmelCase__ , 0 )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE_ : int = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ : Any = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = probs[..., target_ids]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = probs.topk(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE_ : Dict = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE_ : str = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = target_ids[p].tolist()
SCREAMING_SNAKE_CASE_ : Tuple = p
# Filter padding out:
SCREAMING_SNAKE_CASE_ : int = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
if single_mask:
return result[0]
return result
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [targets]
try:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : List[str] = []
for target in targets:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab.get(lowerCAmelCase__ , lowerCAmelCase__ )
if id_ is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )['input_ids']
if len(lowerCAmelCase__ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE_ : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE_ : int = list(set(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(lowerCAmelCase__ )
return target_ids
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE_ : str = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
# initialize config
if "resnet-50" in model_name:
snake_case__ : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
snake_case__ : Dict = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
snake_case__ : str = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
snake_case__ : Optional[int] = "panoptic" in model_name
if is_panoptic:
snake_case__ : Any = 250
else:
snake_case__ : str = 91
snake_case__ : str = "huggingface/label-files"
snake_case__ : Union[str, Any] = "coco-detection-id2label.json"
snake_case__ : List[Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
snake_case__ : Union[str, Any] = {int(snake_case_ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : str ):
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : str = val
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : str=False ):
snake_case__ : List[Any] = ""
if is_panoptic:
snake_case__ : Union[str, Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : str = in_proj_weight[:256, :]
snake_case__ : str = in_proj_bias[:256]
snake_case__ : Dict = in_proj_weight[256:512, :]
snake_case__ : List[str] = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Dict = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Optional[int] = in_proj_weight[256:512, :]
snake_case__ : Dict = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ : Dict = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case__ : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ : Tuple = in_proj_weight_cross_attn[:256, :]
snake_case__ : List[str] = in_proj_bias_cross_attn[:256]
snake_case__ : Dict = in_proj_weight_cross_attn[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias_cross_attn[256:512]
snake_case__ : Optional[int] = in_proj_weight_cross_attn[-256:, :]
snake_case__ : Optional[int] = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : List[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=None , snake_case_ : str=False ):
snake_case__, snake_case__ : Optional[int] = get_detr_config(snake_case_ )
# load original model from torch hub
snake_case__ : Tuple = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
snake_case__ : int = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
snake_case__ : List[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
snake_case__ : Any = "detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Union[str, Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case__ : Tuple = state_dict.pop(snake_case_ )
snake_case__ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[Any] = state_dict.pop(snake_case_ )
snake_case__ : Optional[int] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : Dict = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case__ : Any = state_dict.pop(snake_case_ )
snake_case__ : Any = val
# finally, create HuggingFace model and load state dict
snake_case__ : str = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
snake_case__ : Tuple = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case__ : List[Any] = DetrImageProcessor(format=snake_case_ )
snake_case__ : str = processor(images=prepare_img() , return_tensors="pt" )
snake_case__ : List[str] = encoding["pixel_values"]
snake_case__ : Union[str, Any] = detr(snake_case_ )
snake_case__ : str = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__lowerCamelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 297 | 0 |
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ = np.zeros((n + 1,) )
UpperCamelCase__ = ya
UpperCamelCase__ = xa
for k in range(A ):
UpperCamelCase__ = y[k] + step_size * ode_func(A , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 | import math
import tensorflow as tf
from packaging import version
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(math.pi , x.dtype )
UpperCamelCase__ = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A , 3 )) ))
return x * cdf
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
return x * tf.tanh(tf.math.softplus(A ) )
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(0.04_47_15 , x.dtype )
UpperCamelCase__ = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCamelCase ( A ):
UpperCamelCase__ = tf.convert_to_tensor(A )
UpperCamelCase__ = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCamelCase ( A ):
return tf.clip_by_value(_gelu(A ) , -10 , 10 )
def __UpperCamelCase ( A , A=-1 ):
UpperCamelCase__ , UpperCamelCase__ = tf.split(A , 2 , axis=A )
return a * tf.math.sigmoid(A )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCamelCase ( A ):
return tf.keras.activations.gelu(A , approximate=A )
__magic_name__ =tf.keras.activations.gelu
__magic_name__ =approximate_gelu_wrap
else:
__magic_name__ =_gelu
__magic_name__ =_gelu_new
__magic_name__ ={
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __UpperCamelCase ( A ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 469 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def UpperCAmelCase__ ( *snake_case__ : Any , **snake_case__ : List[str] ):
"""simple docstring"""
pass
def _UpperCAmelCase ( UpperCamelCase: Image ):
"""simple docstring"""
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
lowercase_ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = '''Intel/dpt-large'''
__lowerCAmelCase = pipeline("depth-estimation" , model=lowerCamelCase_ )
__lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__lowerCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 611 | '''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = array[indexa], array[indexa]
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : List[Any] = int(length / 2 )
for i in range(_A , low + middle ):
comp_and_swap(_A , _A , i + middle , _A )
bitonic_merge(_A , _A , _A , _A )
bitonic_merge(_A , low + middle , _A , _A )
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : Optional[int] = int(length / 2 )
bitonic_sort(_A , _A , _A , 1 )
bitonic_sort(_A , low + middle , _A , 0 )
bitonic_merge(_A , _A , _A , _A )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 614 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : int = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =CanineTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
super().setUp()
__magic_name__ : Optional[int] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self : Dict , **lowerCamelCase_ : Optional[int] ) -> CanineTokenizer:
__magic_name__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
__magic_name__ : List[str] = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : List[Any] = self.canine_tokenizer
__magic_name__ : Any = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__magic_name__ : Optional[int] = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__magic_name__ : List[Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : Any = self.canine_tokenizer
__magic_name__ : Dict = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__magic_name__ : Dict = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertIn('''token_type_ids''' , lowerCamelCase_ )
@require_torch
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : int = self.canine_tokenizer
__magic_name__ : Dict = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__magic_name__ : List[Any] = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Tuple = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
__magic_name__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : str = chr(0XE007 )
additional_special_tokens.append(lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn(lowerCamelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ , __magic_name__ : str = self.get_clean_sequence(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : Optional[Any] = 0XE005
__magic_name__ : Optional[Any] = chr(lowerCamelCase_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__magic_name__ : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
__magic_name__ : Union[str, Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase_ )
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Optional[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , input_encoded + special_token_id )
__magic_name__ : Any = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : str = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Tuple = chr(0XE005 )
__magic_name__ : Optional[int] = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
__magic_name__ : Optional[int] = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase_ )
self.assertEqual(token_a[0] , lowerCamelCase_ )
@require_tokenizers
def UpperCAmelCase__ ( self : str ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__magic_name__ : str = 0XE006
__magic_name__ : Optional[int] = chr(lowerCamelCase_ )
__magic_name__ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase_ )
tokenizer.from_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : List[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : int = json.load(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : List[str] = 0XE006
__magic_name__ : List[str] = chr(lowerCamelCase_ )
__magic_name__ : int = [new_token_a]
__magic_name__ : str = [new_token_a]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : int = tokenizer_class.from_pretrained(lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : Optional[int] = 0XE007
__magic_name__ : List[str] = chr(lowerCamelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : List[str] = [AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )]
__magic_name__ : str = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self : Any ) -> List[str]:
__magic_name__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Union[str, Any] = '''hello world'''
if self.space_between_special_tokens:
__magic_name__ : List[Any] = '''[CLS] hello world [SEP]'''
else:
__magic_name__ : List[str] = input
__magic_name__ : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase_ , [output, output.lower()] )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : str = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__magic_name__ : Any = '''a'''
__magic_name__ : List[str] = ord(lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
__magic_name__ : Any = 0XE006
__magic_name__ : str = chr(lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self : Tuple ) -> Any:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
pass
| 501 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCAmelCase_ ( __lowercase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
__lowerCamelCase = RobertaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ : Optional[Any] = add_prefix_space
UpperCAmelCase__ : List[str] = pre_tok_class(**_lowerCAmelCase )
UpperCAmelCase__ : int = add_prefix_space
UpperCAmelCase__ : Dict = """post_processor"""
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase__ : str = tuple(state["""cls"""] )
UpperCAmelCase__ : Optional[int] = False
if state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[str] = True
if state.get("""trim_offsets""" , _lowerCAmelCase ) != trim_offsets:
UpperCAmelCase__ : List[str] = trim_offsets
UpperCAmelCase__ : Any = True
if changes_to_apply:
UpperCAmelCase__ : Tuple = getattr(_lowerCAmelCase , state.pop("""type""" ) )
UpperCAmelCase__ : str = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
UpperCAmelCase__ : List[Any] = value
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : Any = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : List[str] = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 79 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float = 1 / sqrt(2 ) ) ->IIRFilter:
'''simple docstring'''
_lowercase : Dict = tau * frequency / samplerate
_lowercase : Dict = sin(snake_case_ )
_lowercase : Dict = cos(snake_case_ )
_lowercase : Tuple = _sin / (2 * q_factor)
_lowercase : Optional[Any] = (1 - _cos) / 2
_lowercase : Union[str, Any] = 1 - _cos
_lowercase : Optional[Any] = 1 + alpha
_lowercase : Union[str, Any] = -2 * _cos
_lowercase : Optional[Any] = 1 - alpha
_lowercase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float = 1 / sqrt(2 ) ) ->IIRFilter:
'''simple docstring'''
_lowercase : List[str] = tau * frequency / samplerate
_lowercase : Optional[Any] = sin(snake_case_ )
_lowercase : int = cos(snake_case_ )
_lowercase : str = _sin / (2 * q_factor)
_lowercase : Optional[Any] = (1 + _cos) / 2
_lowercase : Tuple = -1 - _cos
_lowercase : str = 1 + alpha
_lowercase : List[str] = -2 * _cos
_lowercase : Any = 1 - alpha
_lowercase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float = 1 / sqrt(2 ) ) ->IIRFilter:
'''simple docstring'''
_lowercase : List[str] = tau * frequency / samplerate
_lowercase : List[Any] = sin(snake_case_ )
_lowercase : Dict = cos(snake_case_ )
_lowercase : Optional[int] = _sin / (2 * q_factor)
_lowercase : Dict = _sin / 2
_lowercase : int = 0
_lowercase : Optional[Any] = -ba
_lowercase : Optional[Any] = 1 + alpha
_lowercase : List[Any] = -2 * _cos
_lowercase : str = 1 - alpha
_lowercase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float = 1 / sqrt(2 ) ) ->IIRFilter:
'''simple docstring'''
_lowercase : int = tau * frequency / samplerate
_lowercase : Tuple = sin(snake_case_ )
_lowercase : List[Any] = cos(snake_case_ )
_lowercase : str = _sin / (2 * q_factor)
_lowercase : Any = 1 - alpha
_lowercase : Optional[Any] = -2 * _cos
_lowercase : List[Any] = 1 + alpha
_lowercase : Any = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float , snake_case_ : float = 1 / sqrt(2 ) , ) ->IIRFilter:
'''simple docstring'''
_lowercase : List[Any] = tau * frequency / samplerate
_lowercase : Optional[Any] = sin(snake_case_ )
_lowercase : Tuple = cos(snake_case_ )
_lowercase : List[Any] = _sin / (2 * q_factor)
_lowercase : int = 10 ** (gain_db / 40)
_lowercase : Tuple = 1 + alpha * big_a
_lowercase : Tuple = -2 * _cos
_lowercase : Union[str, Any] = 1 - alpha * big_a
_lowercase : Dict = 1 + alpha / big_a
_lowercase : Dict = -2 * _cos
_lowercase : Union[str, Any] = 1 - alpha / big_a
_lowercase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float , snake_case_ : float = 1 / sqrt(2 ) , ) ->IIRFilter:
'''simple docstring'''
_lowercase : Optional[Any] = tau * frequency / samplerate
_lowercase : List[str] = sin(snake_case_ )
_lowercase : Any = cos(snake_case_ )
_lowercase : Dict = _sin / (2 * q_factor)
_lowercase : int = 10 ** (gain_db / 40)
_lowercase : str = (big_a + 1) - (big_a - 1) * _cos
_lowercase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
_lowercase : str = (big_a - 1) - (big_a + 1) * _cos
_lowercase : List[str] = (big_a - 1) + (big_a + 1) * _cos
_lowercase : Any = 2 * sqrt(snake_case_ ) * alpha
_lowercase : Optional[Any] = big_a * (pmc + aaa)
_lowercase : Dict = 2 * big_a * mpc
_lowercase : Any = big_a * (pmc - aaa)
_lowercase : Dict = ppmc + aaa
_lowercase : Dict = -2 * pmpc
_lowercase : str = ppmc - aaa
_lowercase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int , snake_case_ : float , snake_case_ : float = 1 / sqrt(2 ) , ) ->IIRFilter:
'''simple docstring'''
_lowercase : List[Any] = tau * frequency / samplerate
_lowercase : List[Any] = sin(snake_case_ )
_lowercase : int = cos(snake_case_ )
_lowercase : Union[str, Any] = _sin / (2 * q_factor)
_lowercase : int = 10 ** (gain_db / 40)
_lowercase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_lowercase : Tuple = (big_a + 1) + (big_a - 1) * _cos
_lowercase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_lowercase : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
_lowercase : Tuple = 2 * sqrt(snake_case_ ) * alpha
_lowercase : Optional[int] = big_a * (ppmc + aaa)
_lowercase : str = -2 * big_a * pmpc
_lowercase : Union[str, Any] = big_a * (ppmc - aaa)
_lowercase : List[Any] = pmc + aaa
_lowercase : List[str] = 2 * mpc
_lowercase : Optional[int] = pmc - aaa
_lowercase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 411 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Dict ) ->List[Any]:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : List[str] = False
elif args.student_type == "gpt2":
_lowercase : List[Any] = False
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[Any] ) ->Any:
'''simple docstring'''
if args.student_type == "roberta":
_lowercase : Optional[int] = False
def _SCREAMING_SNAKE_CASE( ) ->Any:
'''simple docstring'''
_lowercase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=40_00 , help='''Checkpoint interval.''' )
_lowercase : List[str] = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
_lowercase , _lowercase , _lowercase : Dict = MODEL_CLASSES[args.student_type]
_lowercase , _lowercase , _lowercase : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowercase : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowercase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowercase : Optional[Any] = tokenizer.all_special_tokens.index(snake_case_ )
_lowercase : Any = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
_lowercase : Union[str, Any] = special_tok_ids
_lowercase : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
_lowercase : List[Any] = pickle.load(snake_case_ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
_lowercase : Any = pickle.load(snake_case_ )
_lowercase : List[str] = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowercase : Any = 0.0 # do not predict special tokens
_lowercase : Dict = torch.from_numpy(snake_case_ )
else:
_lowercase : str = None
_lowercase : str = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
_lowercase : str = student_config_class.from_pretrained(args.student_config )
_lowercase : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
_lowercase : int = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
_lowercase : Optional[int] = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowercase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowercase : int = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 411 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case ):
def __init__( self ,**A ):
super().__init__(**A )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,A ,**A ):
return super().__call__(A ,**A )
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {}
if "candidate_labels" in kwargs:
UpperCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _UpperCamelCase ( self ,A ,A=None ,A="This is a photo of {}." ):
UpperCAmelCase = load_image(A )
UpperCAmelCase = self.image_processor(images=[image] ,return_tensors=self.framework )
UpperCAmelCase = candidate_labels
UpperCAmelCase = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase = self.tokenizer(A ,return_tensors=self.framework ,padding=A )
UpperCAmelCase = [text_inputs]
return inputs
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,A ):
UpperCAmelCase = text_inputs[0]
else:
# Batching case.
UpperCAmelCase = text_inputs[0][0]
UpperCAmelCase = self.model(**A ,**A )
UpperCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase = probs.tolist()
if not isinstance(A ,A ):
UpperCAmelCase = [scores]
elif self.framework == "tf":
UpperCAmelCase = stable_softmax(A ,axis=-1 )
UpperCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A ,A ) ,key=lambda A : -x[0] )
]
return result
| 341 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,head_mask=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,n_embd=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(A )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] ,dtype=torch.long ,device=A ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(A ,do_sample=A )
self.assertListEqual(output_ids[0].tolist() ,A )
| 341 | 1 |
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> bool:
"""simple docstring"""
__A = 0
for ch in input_str:
__A = ord(__lowercase )
__A = pow(2 , __lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a : Optional[int] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 199 | 1 |
from __future__ import annotations
class lowercase :
def __init__( self , snake_case=None ):
snake_case_ = data
snake_case_ = None
def __repr__( self ):
snake_case_ = []
snake_case_ = self
while temp:
string_rep.append(F'''{temp.data}''' )
snake_case_ = temp.next
return "->".join(snake_case )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not elements_list:
raise Exception('The Elements List is empty' )
snake_case_ = snake_case_ = Node(elements_list[0] )
for i in range(1 , len(UpperCamelCase__ ) ):
snake_case_ = Node(elements_list[i] )
snake_case_ = current.next
return head
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if head_node is not None and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCamelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
snake_case_ = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(UpperCamelCase__ )
print('Elements in Reverse:' )
print_reverse(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 362 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase ( lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''resnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''' )
@property
def a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
return 1e-3
| 362 | 1 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = 9
a__ : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
a__ : Optional[Any] = kruskal(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ ) | 251 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod() | 251 | 1 |
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase (__UpperCamelCase ):
_lowercase = "naver-clova-ix/donut-base-finetuned-docvqa"
_lowercase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_lowercase = "document_qa"
_lowercase = AutoProcessor
_lowercase = VisionEncoderDecoderModel
_lowercase = ["image", "text"]
_lowercase = ["text"]
def __init__( self: Union[str, Any],*A_: Any,**A_: Any ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*snake_case__,**snake_case__ )
def snake_case_ ( self: Dict,A_: "Image",A_: str ):
'''simple docstring'''
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}',snake_case__ )
__UpperCamelCase = self.pre_processor.tokenizer(
snake_case__,add_special_tokens=snake_case__,return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(snake_case__,return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case_ ( self: Tuple,A_: Tuple ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ),decoder_input_ids=inputs['decoder_input_ids'].to(self.device ),max_length=self.model.decoder.config.max_position_embeddings,early_stopping=snake_case__,pad_token_id=self.pre_processor.tokenizer.pad_token_id,eos_token_id=self.pre_processor.tokenizer.eos_token_id,use_cache=snake_case__,num_beams=1,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],return_dict_in_generate=snake_case__,).sequences
def snake_case_ ( self: Dict,A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.pre_processor.batch_decode(snake_case__ )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token,'' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token,'' )
__UpperCamelCase = re.sub(r'<.*?>','',snake_case__,count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(snake_case__ )
return sequence["answer"]
| 1 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCAmelCase__ , lowerCAmelCase__ = y, x % y
return abs(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
try:
lowerCAmelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCAmelCase__ = int(nums[0] )
lowerCAmelCase__ = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowerCamelCase__ , lowerCamelCase__ )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__ , lowerCamelCase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 644 | 0 |
import heapq
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict ) -> set[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__UpperCamelCase , [-1 * len(__UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
SCREAMING_SNAKE_CASE__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
SCREAMING_SNAKE_CASE__ = heapq.heappop(__UpperCamelCase )[1][0]
chosen_vertices.add(__UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
SCREAMING_SNAKE_CASE__ = elem[1][1].index(__UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 379 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 379 | 1 |
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
SCREAMING_SNAKE_CASE__ = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
SCREAMING_SNAKE_CASE__ = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def _a ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = np.array(_snake_case )
A__ = np.array(_snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
A__ = X - np.mean(_snake_case )
A__ = np.cov(reference_distribution.T )
try:
A__ = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
A__ = np.linalg.pinv(_snake_case )
A__ = np.dot(_snake_case , _snake_case )
A__ = np.dot(_snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 9 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
class __SCREAMING_SNAKE_CASE ( nn.Module ):
_UpperCAmelCase : int
_UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
_UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict = self.block_out_channels[i]
lowerCamelCase__ : Union[str, Any] = self.block_out_channels[i + 1]
lowerCamelCase__ : Dict = nn.Conv(
A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A )
lowerCamelCase__ : Union[str, Any] = nn.Conv(
A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A )
lowerCamelCase__ : int = blocks
lowerCamelCase__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Dict ) ->int:
lowerCamelCase__ : Tuple = self.conv_in(A )
lowerCamelCase__ : Dict = nn.silu(A )
for block in self.blocks:
lowerCamelCase__ : Optional[int] = block(A )
lowerCamelCase__ : Optional[int] = nn.silu(A )
lowerCamelCase__ : Union[str, Any] = self.conv_out(A )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module ,lowerCAmelCase_ ,lowerCAmelCase_ ):
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase : Union[bool, Tuple[bool]] = False
_UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[int, Tuple[int]] = 8
_UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase : int = 1_2_8_0
_UpperCAmelCase : float = 0.0
_UpperCAmelCase : bool = False
_UpperCAmelCase : jnp.dtype = jnp.floataa
_UpperCAmelCase : bool = True
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = "rgb"
_UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def __lowerCamelCase ( self : int , A : jax.random.KeyArray ) ->FrozenDict:
# init input tensors
lowerCamelCase__ : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[Any] = jnp.zeros(A , dtype=jnp.floataa )
lowerCamelCase__ : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : List[str] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Union[str, Any] = jnp.zeros(A , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = jax.random.split(A )
lowerCamelCase__ : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A , A , A , A , A )["params"]
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : int = self.block_out_channels
lowerCamelCase__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(A , dtype=self.dtype )
lowerCamelCase__ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(A , A ):
lowerCamelCase__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A , A ):
lowerCamelCase__ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[int] = block_out_channels[0]
lowerCamelCase__ : Tuple = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Any = output_channel
lowerCamelCase__ : int = block_out_channels[i]
lowerCamelCase__ : Dict = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : str = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
if not is_final_block:
lowerCamelCase__ : int = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A )
lowerCamelCase__ : Union[str, Any] = down_blocks
lowerCamelCase__ : Tuple = controlnet_down_blocks
# mid
lowerCamelCase__ : Dict = block_out_channels[-1]
lowerCamelCase__ : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ : int = nn.Conv(
A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Dict , A : Tuple , A : List[str] , A : int , A : float = 1.0 , A : bool = True , A : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : Union[str, Any] = jnp.flip(A , axis=1 )
# 1. time
if not isinstance(A , jnp.ndarray ):
lowerCamelCase__ : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : str = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Optional[Any] = jnp.expand_dims(A , 0 )
lowerCamelCase__ : Dict = self.time_proj(A )
lowerCamelCase__ : List[Any] = self.time_embedding(A )
# 2. pre-process
lowerCamelCase__ : int = jnp.transpose(A , (0, 2, 3, 1) )
lowerCamelCase__ : List[Any] = self.conv_in(A )
lowerCamelCase__ : Dict = jnp.transpose(A , (0, 2, 3, 1) )
lowerCamelCase__ : List[str] = self.controlnet_cond_embedding(A )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : List[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = down_block(A , A , A , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : str = down_block(A , A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Any = self.mid_block(A , A , A , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] = ()
for down_block_res_sample, controlnet_block in zip(A , self.controlnet_down_blocks ):
lowerCamelCase__ : str = controlnet_block(A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = controlnet_down_block_res_samples
lowerCamelCase__ : Optional[int] = self.controlnet_mid_block(A )
# 6. scaling
lowerCamelCase__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A , mid_block_res_sample=A )
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Any = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : Tuple = 1000 )-> Dict:
"""simple docstring"""
return sum(e for e in range(3 , _UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 138 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 0 |
from __future__ import annotations
def A ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = cipher_alphabet or [chr(UpperCAmelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''a''': 0.0_8497,
'''b''': 0.0_1492,
'''c''': 0.0_2202,
'''d''': 0.0_4253,
'''e''': 0.1_1162,
'''f''': 0.0_2228,
'''g''': 0.0_2015,
'''h''': 0.0_6094,
'''i''': 0.0_7546,
'''j''': 0.0_0153,
'''k''': 0.0_1292,
'''l''': 0.0_4025,
'''m''': 0.0_2406,
'''n''': 0.0_6749,
'''o''': 0.0_7507,
'''p''': 0.0_1929,
'''q''': 0.0_0095,
'''r''': 0.0_7587,
'''s''': 0.0_6327,
'''t''': 0.0_9356,
'''u''': 0.0_2758,
'''v''': 0.0_0978,
'''w''': 0.0_2560,
'''x''': 0.0_0150,
'''y''': 0.0_1994,
'''z''': 0.0_0077,
}
else:
# Custom frequencies dictionary
SCREAMING_SNAKE_CASE : Any = frequencies_dict
if not case_sensitive:
SCREAMING_SNAKE_CASE : Optional[Any] = ciphertext.lower()
# Chi squared statistic values
SCREAMING_SNAKE_CASE : Optional[int] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE : Dict = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
SCREAMING_SNAKE_CASE : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
SCREAMING_SNAKE_CASE : str = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE : List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE : Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE : Optional[Any] = decrypted_with_shift.count(UpperCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE : Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
SCREAMING_SNAKE_CASE : Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowercase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
SCREAMING_SNAKE_CASE : Tuple = min(
UpperCAmelCase__ , key=UpperCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 713 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.9 , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = size if size is not None else {'shortest_edge': 224}
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['height'] / crop_pct )
else:
__UpperCamelCase = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(_SCREAMING_SNAKE_CASE ) )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , crop_pct=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 383 |
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase = {}
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase = {}
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = probability
def __lowercase( self ) -> list[str]:
return list(self.connections )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
__UpperCamelCase = 0
__UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _a ( __lowercase , __lowercase , __lowercase ) -> dict[str, int]:
"""simple docstring"""
__UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = Counter(graph.get_nodes() )
__UpperCamelCase = start
for _ in range(__lowercase ):
__UpperCamelCase = graph.transition(__lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383 | 1 |
'''simple docstring'''
def _A ( snake_case__ : Any = 1_00 ):
snake_case__ : Any = (n * (n + 1) // 2) ** 2
snake_case__ : Dict = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , _SCREAMING_SNAKE_CASE , )
| 389 |
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388 | 0 |
def __lowercase ( lowerCamelCase_ : list , lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE__ = values[index] + knapsack(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from math import factorial
def __lowercase ( lowerCamelCase_ : int = 100 ):
return sum(int(lowerCamelCase_ ) for x in str(factorial(lowerCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 112 | 0 |
'''simple docstring'''
from math import loga
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : str=7 , __a : Union[str, Any]=3 , __a : Optional[int]=18 , __a : Dict=30 , __a : Dict=400 , __a : int=True , __a : Dict=None , __a : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
__lowercase : int = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : Any = parent
__lowercase : List[Any] = batch_size
__lowercase : Tuple = num_channels
__lowercase : Dict = image_size
__lowercase : Optional[Any] = min_resolution
__lowercase : str = max_resolution
__lowercase : Optional[Any] = do_resize
__lowercase : Optional[Any] = size
__lowercase : List[str] = do_normalize
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """clusters""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Any = self.image_processing_class(**self.image_processor_dict )
__lowercase : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key] ) )
else:
self.assertEqual(obj[key] , __a )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = os.path.join(__a , """image_processor.json""" )
image_processor_first.to_json_file(__a )
__lowercase : Optional[Any] = self.image_processing_class.from_json_file(__a ).to_dict()
__lowercase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a )
__lowercase : List[Any] = self.image_processing_class.from_pretrained(__a ).to_dict()
__lowercase : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case_ ( ):
__lowercase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowercase : Optional[Any] = Image.open(dataset[4]["""file"""] )
__lowercase : Union[str, Any] = Image.open(dataset[5]["""file"""] )
__lowercase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowercase : int = prepare_images()
# test non-batched
__lowercase : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__lowercase : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a )
# test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__lowercase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a ) | 149 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowercase = "hf-internal-testing/tiny-random-bert"
_lowercase = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
_lowercase = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) )
with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) )
self.assertTrue(os.path.isfile(_UpperCamelCase ) )
# File is cached at the same place the second time.
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , _UpperCamelCase )
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ):
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ):
snake_case = cached_file(_UpperCamelCase , '''conf''' )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ):
snake_case = cached_file(_UpperCamelCase , '''conf''' )
with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) )
snake_case = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
snake_case = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
snake_case = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(_UpperCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(_UpperCamelCase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
| 715 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44 | 0 |
import argparse
import datetime
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
__a ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_snake_case ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
__a =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
__a =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
__a =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
__a =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
__a =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
__a =datetime.date(int(_snake_case ) , int(_snake_case ) , int(_snake_case ) )
# Start math
if m <= 2:
__a =y - 1
__a =m + 12
# maths var
__a =int(str(_snake_case )[:2] )
__a =int(str(_snake_case )[2:] )
__a =int(2.6 * m - 5.39 )
__a =int(c / 4 )
__a =int(k / 4 )
__a =int(d + k )
__a =int(t + u + v + x )
__a =int(z - (2 * c) )
__a =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
__a =F'Your date {date_input}, is a {days[str(_snake_case )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[str] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
_lowerCAmelCase : List[str] = parser.parse_args()
zeller(args.date_input)
| 242 |
from jiwer import compute_measures
import datasets
_lowerCAmelCase : Union[str, Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowerCAmelCase : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_lowerCAmelCase : int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __magic_name__ ( self , __snake_case=None , __snake_case=None , __snake_case=False ) -> Any:
'''simple docstring'''
if concatenate_texts:
return compute_measures(__snake_case , __snake_case )["wer"]
else:
__a =0
__a =0
for prediction, reference in zip(__snake_case , __snake_case ):
__a =compute_measures(__snake_case , __snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 242 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Any ={"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any =["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str =[
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[str] ={
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] =[
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCAmelCase__: Dict = logging.get_logger(__name__)
class snake_case_ ( UpperCamelCase_ ):
def __init__( self , **__lowerCAmelCase ):
requires_backends(self , ['bs4'] )
super().__init__(**__UpperCamelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE_ : Tuple = parent.find_all(child.name , recursive=__UpperCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE_ : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = BeautifulSoup(__UpperCamelCase , 'html.parser' )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for element in html_code.descendants:
if type(__UpperCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE_ : List[Any] = html.unescape(__UpperCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.xpath_soup(__UpperCamelCase )
stringaxtag_seq.append(__UpperCamelCase )
stringaxsubs_seq.append(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = ''''''
for tagname, subs in zip(__UpperCamelCase , __UpperCamelCase ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict = True
elif isinstance(__UpperCamelCase , (list, tuple) ):
if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F'but is of type {type(__UpperCamelCase )}.' )
SCREAMING_SNAKE_CASE_ : Any = bool(isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCamelCase )) )
if not is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Dict = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE_ : str = self.get_three_from_single(__UpperCamelCase )
nodes.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for node, tag_list, sub_list in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.construct_xpath(__UpperCamelCase , __UpperCamelCase )
xpath_strings.append(__UpperCamelCase )
xpaths.append(__UpperCamelCase )
# return as Dict
SCREAMING_SNAKE_CASE_ : List[Any] = {'''nodes''': nodes, '''xpaths''': xpaths}
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs
| 345 |
import sys
import turtle
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__lowerCamelCase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__lowerCamelCase : List[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 416 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[str] = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_stages
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =out_features
lowerCamelCase_ =num_labels
lowerCamelCase_ =scope
lowerCamelCase_ =num_stages
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def _snake_case ( self )-> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self )-> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self )-> str:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Dict = False
_UpperCamelCase:int = False
_UpperCamelCase:Any = False
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Optional[Any] = False
def _snake_case ( self )-> int:
lowerCamelCase_ =UperNetModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )-> Tuple:
return
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self )-> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self )-> str:
pass
def _snake_case ( self )-> Optional[int]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _snake_case ( self )-> Dict:
pass
@slow
def _snake_case ( self )-> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowerCamelCase_ =Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 75 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 348 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
snake_case : str = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
snake_case : Optional[int] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
snake_case : List[Any] = '|'.join(sys.argv[1:])
snake_case : int = re.compile(rF"""^({joined_dirs}).*?\.py$""")
snake_case : int = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 711 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase )
else:
__lowercase = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase__ ( __UpperCamelCase : Dict ):
'''simple docstring'''
__lowercase = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(__UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : PreTrainedTokenizerBase
UpperCamelCase : Union[bool, str, PaddingStrategy] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = -100
UpperCamelCase : str = "pt"
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
import torch
__lowercase = """label""" if """label""" in features[0].keys() else """labels"""
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__lowercase = torch.tensor(batch["""entity_ids"""] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowercase = [feature["""ner_tags"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = [feature["""original_entity_spans"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 339 | 0 |
import re
from filelock import FileLock
try:
import nltk
a__ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a__ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 188 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = GPTaTokenizer
def __init__( self : Optional[Any] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , _A : Optional[int]="<|endoftext|>" , _A : List[Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Any=False , **_A : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
lowercase : List[str] = kwargs.pop('''add_bos_token''' , _A )
lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : Optional[int] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase : List[str] = add_prefix_space
lowercase : List[Any] = pre_tok_class(**_A )
lowercase : Dict = add_prefix_space
def __a ( self : List[Any] , *_A : Optional[Any] , **_A : Any ) -> BatchEncoding:
"""simple docstring"""
lowercase : List[str] = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __a ( self : Dict , *_A : List[str] , **_A : Dict ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __a ( self : Dict , _A : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
lowercase : int = input_ids[-self.model_max_length :]
return input_ids | 217 | 0 |
"""simple docstring"""
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCamelCase__ : Any = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCamelCase__ : Optional[int] = nn.Parameter(_lowercase )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = np.asarray(weights[0] )
lowerCamelCase__ : List[Any] = np.asarray(weights[1] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = np.asarray(weights[0] )
lowerCamelCase__ : str = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : List[Any] = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
lowerCamelCase__ : Tuple = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
lowerCamelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
lowerCamelCase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCamelCase__ : List[Any] = nn.Parameter(torch.tensor(_lowercase ) )
lowerCamelCase__ : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
lowerCamelCase__ : Optional[int] = np.asarray(weights[7][0] )
lowerCamelCase__ : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
lowerCamelCase__ : Any = np.asarray(weights[9][0] )
lowerCamelCase__ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : List[Any] = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 121 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCAmelCase ( *A , A = None , A=True , A=2 ):
'''simple docstring'''
from .. import __version__
UpperCAmelCase__ =take_from
UpperCAmelCase__ =()
if not isinstance(args[0] , A ):
UpperCAmelCase__ =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A ).base_version ) >= version.parse(A ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
UpperCAmelCase__ =None
if isinstance(A , A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A ),)
UpperCAmelCase__ =F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(A , A ):
values += (getattr(A , A ),)
UpperCAmelCase__ =F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
UpperCAmelCase__ =F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
UpperCAmelCase__ =warning + " " if standard_warn else ""
warnings.warn(warning + message , A , stacklevel=A )
if isinstance(A , A ) and len(A ) > 0:
UpperCAmelCase__ =inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase__ =call_frame.filename
UpperCAmelCase__ =call_frame.lineno
UpperCAmelCase__ =call_frame.function
UpperCAmelCase__ , UpperCAmelCase__ =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(A ) == 0:
return
elif len(A ) == 1:
return values[0]
return values
| 625 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'EncodecFeatureExtractor'
__UpperCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self, A_, A_ ) -> Optional[int]:
super().__init__(A_, A_ )
UpperCAmelCase__ =self.feature_extractor
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_=None, A_=None, A_=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A_, language=A_, no_timestamps=A_ )
def __call__( self, *A_, **A_ ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_, **A_ )
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("sampling_rate", A_ )
UpperCAmelCase__ =kwargs.pop("text", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCAmelCase__ =self.tokenizer(A_, **A_ )
if audio is not None:
UpperCAmelCase__ =self.feature_extractor(A_, *A_, sampling_rate=A_, **A_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase__ =audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCAmelCase__ =audio_inputs["padding_mask"]
return inputs
def __UpperCAmelCase ( self, *A_, **A_ ) -> Dict:
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("padding_mask", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio_values is not None:
return self._decode_audio(A_, padding_mask=A_ )
else:
return self.tokenizer.batch_decode(*A_, **A_ )
def __UpperCAmelCase ( self, *A_, **A_ ) -> int:
return self.tokenizer.decode(*A_, **A_ )
def __UpperCAmelCase ( self, A_, A_ = None ) -> List[np.ndarray]:
UpperCAmelCase__ =to_numpy(A_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =audio_values.shape
if padding_mask is None:
return list(A_ )
UpperCAmelCase__ =to_numpy(A_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase__ =seq_len - padding_mask.shape[-1]
UpperCAmelCase__ =1 - self.feature_extractor.padding_value
UpperCAmelCase__ =np.pad(A_, ((0, 0), (0, difference)), "constant", constant_values=A_ )
UpperCAmelCase__ =audio_values.tolist()
for i in range(A_ ):
UpperCAmelCase__ =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase__ =sliced_audio.reshape(A_, -1 )
return audio_values
| 625 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A : Optional[int] = get_logger(__name__)
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=0 ):
"""simple docstring"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ : Any =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ : str =F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
A__ : int =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ : Dict =(
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
A__ : Tuple =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ : Optional[Any] =os.path.join(lowerCamelCase__ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F'''Saving model to {ckpt_dir}''' )
A__ : Optional[Any] ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : str=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
A__ : Union[str, Any] =F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
A__ : Any =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Loading model from {input_model_file}''' )
A__ : List[str] =torch.load(lowerCamelCase__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ : Any =(
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
A__ : List[str] =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Loading model from {input_model_file}''' )
A__ : Union[str, Any] =torch.load(lowerCamelCase__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ : Optional[int] =(
os.path.join(lowerCamelCase__ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
A__ : int ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , planner=DefaultLoadPlanner() , )
A__ : Tuple =state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(lowerCamelCase__ )
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int=0 ):
"""simple docstring"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ : Dict =FSDP.optim_state_dict(lowerCamelCase__ , lowerCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ : Optional[Any] =(
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
A__ : Optional[int] =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
A__ : str =os.path.join(lowerCamelCase__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def lowercase ( UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ : List[str] =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ : Any =(
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
A__ : Any =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
A__ : int =torch.load(lowerCamelCase__ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
A__ : List[Any] =(
os.path.join(lowerCamelCase__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
A__ : Optional[Any] =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , )
A__ : Dict =optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
A__ : Optional[Any] =FSDP.optim_state_dict_to_load(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
optimizer.load_state_dict(lowerCamelCase__ )
| 715 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 595 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='Translation' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='TranslationVariableLanguages' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ) -> Dict:
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE_ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ ,lowerCamelCase_ = zip(*sorted(SCREAMING_SNAKE_CASE_ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 42 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) as f:
lowercase = json.load(__SCREAMING_SNAKE_CASE )
lowercase = {}
lowercase = []
lowercase = []
for key, info in class_info.items():
lowercase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(__SCREAMING_SNAKE_CASE ) )
lowercase = thing_ids
lowercase = class_names
return metadata
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=None , snake_case=True , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=10 , snake_case=False , snake_case=255 , snake_case="shi-labs/oneformer_demo" , snake_case="ade20k_panoptic.json" , snake_case=10 , ):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = class_info_file
lowercase = prepare_metadata(snake_case , snake_case )
lowercase = num_text
lowercase = repo_path
# for the post_process_functions
lowercase = 2
lowercase = 10
lowercase = 10
lowercase = 3
lowercase = 4
lowercase = num_labels
lowercase = do_reduce_labels
lowercase = ignore_index
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False ):
if not batched:
lowercase = image_inputs[0]
if isinstance(snake_case , Image.Image ):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size['shortest_edge'] * h / w )
lowercase = self.size['shortest_edge']
elif w > h:
lowercase = self.size['shortest_edge']
lowercase = int(self.size['shortest_edge'] * w / h )
else:
lowercase = self.size['shortest_edge']
lowercase = self.size['shortest_edge']
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(snake_case , key=lambda snake_case : item[0] )[0]
lowercase = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE__ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_UpperCamelCase : Optional[int] = image_processing_class
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'ignore_index' ) )
self.assertTrue(hasattr(snake_case , 'class_info_file' ) )
self.assertTrue(hasattr(snake_case , 'num_text' ) )
self.assertTrue(hasattr(snake_case , 'repo_path' ) )
self.assertTrue(hasattr(snake_case , 'metadata' ) )
self.assertTrue(hasattr(snake_case , 'do_reduce_labels' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
lowercase = image_processor(
snake_case , ['semantic'] * len(snake_case ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
lowercase = image_processor(
snake_case , ['semantic'] * len(snake_case ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
lowercase = image_processor(
snake_case , ['semantic'] * len(snake_case ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=False , snake_case=False , snake_case="np" ):
lowercase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase = self.image_processing_tester.num_labels
lowercase = None
lowercase = None
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
lowercase = num_labels
if is_instance_map:
lowercase = list(range(snake_case ) ) * 2
lowercase = dict(enumerate(snake_case ) )
lowercase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase = [Image.fromarray(snake_case ) for annotation in annotations]
lowercase = image_processor(
snake_case , ['semantic'] * len(snake_case ) , snake_case , return_tensors='pt' , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
def common(snake_case=False , snake_case=None ):
lowercase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
lowercase = inputs['mask_labels']
lowercase = inputs['class_labels']
lowercase = inputs['pixel_values']
lowercase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type='pil' )
common(is_instance_map=snake_case , segmentation_type='pil' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = np.zeros((20, 50) )
lowercase = 1
lowercase = 1
lowercase = 1
lowercase = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowercase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 715 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 565 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
"""simple docstring"""
a : Any =["torch", "scipy"]
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 700 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 1_000 ) -> int:
a_ : List[Any] = 2**power
a_ : Dict = 0
while n:
a_ , a_ : Union[str, Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 237 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = """▁"""
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
SCREAMING_SNAKE_CASE_ = {
"""xlm-roberta-base""": 5_12,
"""xlm-roberta-large""": 5_12,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_12,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_12,
"""xlm-roberta-large-finetuned-conll03-english""": 5_12,
"""xlm-roberta-large-finetuned-conll03-german""": 5_12,
}
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
a_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : int = 1
a_ : Dict = len(self.sp_model ) + self.fairseq_offset
a_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
a_ : Any = self.__dict__.copy()
a_ : Optional[int] = None
a_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a_ ):
a_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : str = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Dict = [self.sep_token_id]
a_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case_ ( self ):
a_ : Union[str, Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : str = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , a_ ):
a_ : str = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,) | 237 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ): # noqa: E741
__UpperCAmelCase : Any = len(_UpperCAmelCase )
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = [0] * n
__UpperCAmelCase : Any = [False] * n
__UpperCAmelCase : Any = [False] * n
def dfs(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[str] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase : List[Any] = dfs(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Tuple = min(low[at], low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase : Union[str, Any] = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase : Tuple = True
else:
__UpperCAmelCase : Union[str, Any] = min(low[at], _UpperCAmelCase )
return out_edge_count
for i in range(_UpperCAmelCase ):
if not visited[i]:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = dfs(_UpperCAmelCase, _UpperCAmelCase, -1, _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = out_edge_count > 1
for x in range(len(_UpperCAmelCase ) ):
if is_art[x] is True:
print(_UpperCAmelCase )
# Adjacency list of graph
lowerCAmelCase__ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : int = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=17 , SCREAMING_SNAKE_CASE_=23 , SCREAMING_SNAKE_CASE_=11 , SCREAMING_SNAKE_CASE_=True , ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Dict = seq_length
__lowerCamelCase : Tuple = act_dim
__lowerCamelCase : int = state_dim
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = max_length
__lowerCamelCase : str = is_training
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase : List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
__lowerCamelCase : Any = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase : str = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
__lowerCamelCase : List[Any] = DecisionTransformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCamelCase : Tuple = model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = config_and_inputs
__lowerCamelCase : Optional[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase : Optional[int] = ()
lowerCamelCase : Any = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase : List[str] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase : Dict = False
lowerCamelCase : str = False
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
def lowercase_ ( self ) -> str:
__lowerCamelCase : Tuple = DecisionTransformerModelTester(self )
__lowerCamelCase : Dict = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowercase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = DecisionTransformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase_ ( self ) -> str:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(_snake_case )
__lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_snake_case )] , _snake_case )
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase : Tuple = 10 # defined by the RL environment, may be normalized
__lowerCamelCase : Optional[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
__lowerCamelCase : str = model.to(_snake_case )
__lowerCamelCase : List[Any] = model.config
torch.manual_seed(0 )
__lowerCamelCase : Any = torch.randn(1 , 1 , config.state_dim ).to(device=_snake_case , dtype=torch.floataa ) # env.reset()
__lowerCamelCase : Optional[Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_snake_case )
__lowerCamelCase : Optional[Any] = torch.tensor(_snake_case , device=_snake_case , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase : Dict = state
__lowerCamelCase : Dict = torch.zeros(1 , 0 , config.act_dim , device=_snake_case , dtype=torch.floataa )
__lowerCamelCase : Optional[Any] = torch.zeros(1 , 0 , device=_snake_case , dtype=torch.floataa )
__lowerCamelCase : int = torch.tensor(0 , device=_snake_case , dtype=torch.long ).reshape(1 , 1 )
for step in range(_snake_case ):
__lowerCamelCase : Any = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_snake_case )] , dim=1 )
__lowerCamelCase : Optional[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=_snake_case )] , dim=1 )
__lowerCamelCase : Optional[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = model(
states=_snake_case , actions=_snake_case , rewards=_snake_case , returns_to_go=_snake_case , timesteps=_snake_case , attention_mask=_snake_case , return_dict=_snake_case , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_snake_case , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase : Union[str, Any] = action_pred[0, -1]
__lowerCamelCase : List[Any] = torch.cat([states, state] , dim=1 )
__lowerCamelCase : List[str] = returns_to_go[0, -1] - reward
__lowerCamelCase : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase : List[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_snake_case , dtype=torch.long ) * (step + 1)] , dim=1 )
| 13 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCamelCase_ : List[str] = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "retribert"
def __init__( self : str , _snake_case : List[str]=30_522 , _snake_case : List[Any]=768 , _snake_case : List[Any]=8 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[Any]=3_072 , _snake_case : List[Any]="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Dict=0.1 , _snake_case : str=512 , _snake_case : Tuple=2 , _snake_case : Dict=0.0_2 , _snake_case : Optional[Any]=1e-12 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=128 , _snake_case : Union[str, Any]=0 , **_snake_case : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = share_encoders
A_ = projection_dim
| 115 | 0 |
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( snake_case_ = 1000000 ):
'''simple docstring'''
_lowercase = 0
_lowercase = 0
_lowercase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
_lowerCamelCase = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Tuple = DistilBertTokenizer
def __init__( self : Tuple , lowercase__ : Tuple=None , lowercase__ : Dict=None , lowercase__ : Dict=True , lowercase__ : Tuple="[UNK]" , lowercase__ : Optional[int]="[SEP]" , lowercase__ : Union[str, Any]="[PAD]" , lowercase__ : Optional[Any]="[CLS]" , lowercase__ : Dict="[MASK]" , lowercase__ : int=True , lowercase__ : List[Any]=None , **lowercase__ : Tuple , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
_lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowercase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase__) != tokenize_chinese_chars
):
_lowercase = getattr(lowercase__ , normalizer_state.pop("""type"""))
_lowercase = do_lower_case
_lowercase = strip_accents
_lowercase = tokenize_chinese_chars
_lowercase = normalizer_class(**lowercase__)
_lowercase = do_lower_case
def _UpperCAmelCase ( self : Dict , lowercase__ : Optional[int] , lowercase__ : List[str]=None) ->Union[str, Any]:
"""simple docstring"""
_lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Dict , lowercase__ : str , lowercase__ : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
_lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
| 572 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = args.log_outputs
SCREAMING_SNAKE_CASE_: Optional[int] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE_: str = load_metric("wer" )
SCREAMING_SNAKE_CASE_: str = load_metric("cer" )
# compute metrics
SCREAMING_SNAKE_CASE_: Optional[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
SCREAMING_SNAKE_CASE_: List[Any] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
SCREAMING_SNAKE_CASE_: Any = f"WER: {wer_result}\nCER: {cer_result}"
print(_UpperCAmelCase )
with open(f"{dataset_id}_eval_results.txt" , "w" ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = f"log_{dataset_id}_predictions.txt"
SCREAMING_SNAKE_CASE_: List[str] = f"log_{dataset_id}_targets.txt"
with open(_UpperCAmelCase , "w" ) as p, open(_UpperCAmelCase , "w" ) as t:
# mapping function to write output
def write_to_file(_UpperCAmelCase , _UpperCAmelCase ):
p.write(f"{i}" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"{i}" + "\n" )
t.write(batch["target"] + "\n" )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE_: List[Any] = re.sub(_UpperCAmelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE_: Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE_: str = " ".join(text.split(_UpperCAmelCase ) )
return text
def A_ ( _UpperCAmelCase ):
# load dataset
SCREAMING_SNAKE_CASE_: str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE_: Dict = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE_: Optional[int] = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE_: Dict = dataset.cast_column("audio" , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE_: Tuple = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE_: int = prediction["text"]
SCREAMING_SNAKE_CASE_: Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE_: Optional[Any] = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCAmelCase : Optional[int] = parser.parse_args()
main(args)
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = str(_lowercase )
return n == n[::-1]
def A_ ( snake_case = 1000000 ):
SCREAMING_SNAKE_CASE:int = 0
for i in range(1 , _lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 712 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : Any = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
| 465 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_UpperCamelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
_UpperCamelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_UpperCamelCase = """zero2"""
_UpperCamelCase = """zero3"""
_UpperCamelCase = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowerCAmelCase__ : Dict = parameterized.to_safe_name("_".join(str(lowercase__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
_UpperCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __a ( __magic_name__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ):
"""simple docstring"""
lowerCAmelCase__ : Dict = models[model]
lowerCAmelCase__ : Dict = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
lowerCAmelCase__ : List[str] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase__ : Dict = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowerCAmelCase__ : str = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowerCAmelCase__ : Optional[Any] = self.get_launcher(snake_case )
lowerCAmelCase__ : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , snake_case=False ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 453 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCamelCase = random.Random()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> Optional[Any]:
if rng is None:
lowerCAmelCase__ : Union[str, Any] = global_rng
lowerCAmelCase__ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=400 , snake_case=2_000 , snake_case=2_048 , snake_case=128 , snake_case=1 , snake_case=512 , snake_case=30 , snake_case=44_100 , ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : Optional[int] = max_seq_length
lowerCAmelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = spectrogram_length
lowerCAmelCase__ : Any = feature_size
lowerCAmelCase__ : int = num_audio_channels
lowerCAmelCase__ : Optional[int] = hop_length
lowerCAmelCase__ : List[str] = chunk_length
lowerCAmelCase__ : Optional[Any] = sampling_rate
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE_ ( self , snake_case=False , snake_case=False ):
"""simple docstring"""
def _flatten(snake_case ):
return list(itertools.chain(*snake_case ) )
if equal_length:
lowerCAmelCase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Dict = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(snake_case , "feature_size" ) )
self.assertTrue(hasattr(snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(snake_case , "hop_length" ) )
self.assertTrue(hasattr(snake_case , "chunk_length" ) )
self.assertTrue(hasattr(snake_case , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
lowerCAmelCase__ : Tuple = self.feature_extraction_class.from_pretrained(snake_case )
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : int = feat_extract_second.to_dict()
lowerCAmelCase__ : Union[str, Any] = dict_first.pop("mel_filters" )
lowerCAmelCase__ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(snake_case )
lowerCAmelCase__ : Optional[Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : Dict = feat_extract_second.to_dict()
lowerCAmelCase__ : Tuple = dict_first.pop("mel_filters" )
lowerCAmelCase__ : Dict = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Optional[int] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase__ : Optional[int] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase__ : List[str] = feature_extractor(
snake_case , return_tensors="np" , sampling_rate=44_100 , mask_audio=snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : Tuple = np.asarray(snake_case )
lowerCAmelCase__ : List[Any] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase__ : int = ds.sort("id" ).select(range(snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self._load_datasamples(1 )
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractor()
lowerCAmelCase__ : Dict = feature_extractor(snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCAmelCase__ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case , atol=1e-4 ) )
| 453 | 1 |
"""simple docstring"""
import numpy as np
class UpperCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A = (0, 0)
A = None
A = 0
A = 0
A = 0
def __eq__( self : Any , snake_case : Dict ) -> str:
'''simple docstring'''
return self.position == cell.position
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
print(self.position )
class UpperCAmelCase__ :
def __init__( self : Any , snake_case : Any=(5, 5) ) -> Optional[Any]:
'''simple docstring'''
A = np.zeros(snake_case )
A = world_size[0]
A = world_size[1]
def A_ ( self : Any ) -> Any:
'''simple docstring'''
print(self.w )
def A_ ( self : List[Any] , snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
A = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A = cell.position[0]
A = cell.position[1]
A = []
for n in neughbour_cord:
A = current_x + n[0]
A = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A = Cell()
A = (x, y)
A = cell
neighbours.append(snake_case )
return neighbours
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
A = []
A = []
_open.append(lowerCamelCase__ )
while _open:
A = np.argmin([n.f for n in _open] )
A = _open[min_f]
_closed.append(_open.pop(lowerCamelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCamelCase__ ):
for c in _closed:
if c == n:
continue
A = current.g + 1
A , A = n.position
A , A = goal.position
A = (ya - ya) ** 2 + (xa - xa) ** 2
A = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCamelCase__ )
A = []
while current.parent is not None:
path.append(current.position )
A = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A = Gridworld()
# Start position and goal
A = Cell()
A = (0, 0)
A = Cell()
A = (4, 4)
print(F'path from {start.position} to {goal.position}')
A = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A = 1
print(world.w)
| 109 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase: Optional[Any] ="https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def __snake_case ( __A = "mumbai" ) -> Generator[tuple[str, str], None, None]:
lowercase : Union[str, Any] = BeautifulSoup(requests.get(url + location ).content ,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" ,attrs={"""data-tn-component""": """organicJob"""} ):
lowercase : List[Any] = job.find("""a""" ,attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowercase : Union[str, Any] = job.find("""span""" ,{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 607 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: str ={
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple =["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict =[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict =[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 607 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCAmelCase = {
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase):
__SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE : Any = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Dict , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Optional[Any]="<pad>" , __UpperCamelCase : Tuple=100 , __UpperCamelCase : Tuple=None , **__UpperCamelCase : str , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [F'''<extra_id_{i}>''' for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_UpperCAmelCase = len(set(filter(lambda __UpperCamelCase : bool("extra_id_" in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
_UpperCAmelCase = extra_ids
@staticmethod
def UpperCAmelCase__ ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_UpperCAmelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _lowerCamelCase , )
return max_model_length
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_UpperCAmelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Union[str, Any] ):
return list(
set(filter(lambda __UpperCamelCase : bool(re.search(r"<extra_id_\d+>" , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : Any ):
return [self.convert_tokens_to_ids(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
| 701 |
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = [0] * len(_lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
_UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print("Cycle exists" )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
__lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 129 | 0 |
class __A :
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : int = val
def __str__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
return self.val < other.val
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Optional[Any] = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.get_value(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
return (idx - 1) // 2
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
return idx * 2 + 1
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
return idx * 2 + 2
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ):
return self.heap_dict[key]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Union[str, Any] = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = idx
lowerCAmelCase : Union[str, Any] = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
while True:
lowerCAmelCase : Optional[int] = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
lowerCAmelCase : Union[str, Any] = self.get_right_child_idx(UpperCAmelCase_ )
lowerCAmelCase : Any = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
lowerCAmelCase : Tuple = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase : Any = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : List[str] = smallest
else:
break
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
def lowercase__ ( self : str ):
return self.heap[0]
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
self.heap.append(UpperCAmelCase_ )
lowerCAmelCase : str = len(self.heap ) - 1
lowerCAmelCase : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self : Optional[int] ):
return len(self.heap ) == 0
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : Optional[int] = new_value
lowerCAmelCase : str = new_value
self.sift_up(self.idx_of_element[node] )
__A : Tuple = Node('''R''', -1)
__A : int = Node('''B''', 6)
__A : int = Node('''A''', 3)
__A : Optional[Any] = Node('''X''', 1)
__A : List[str] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
lowerCAmelCase : int = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) if (len(_UpperCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ), 'Stack'.center(_UpperCAmelCase ), 'Postfix'.center(_UpperCAmelCase ), sep=' | ', )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase ) == 0:
stack.append(_UpperCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCAmelCase ) # push x to stack
print(
x.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
while len(_UpperCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
return "".join(_UpperCAmelCase ) # return Postfix as str
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCAmelCase ) ):
if infix[i] == "(":
lowerCAmelCase : int = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase : List[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A : Optional[Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__A : str = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 343 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ : int = logging.getLogger()
def __snake_case ( _UpperCAmelCase : Path, _UpperCAmelCase : list):
UpperCamelCase = '''\n'''.join(_UpperCAmelCase)
Path(_UpperCAmelCase).open('''w''').writelines(_UpperCAmelCase)
snake_case_ : Any = 'patrickvonplaten/t5-tiny-random'
snake_case_ : Tuple = 'sshleifer/bart-tiny-random'
snake_case_ : str = 'sshleifer/tiny-mbart'
snake_case_ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCamelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCamelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCamelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_generate()
assert Path(lowerCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase ( self ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCamelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCamelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCamelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCamelCase = str(tmp_dir / '''scores.json''' )
UpperCamelCase = str(tmp_dir / '''val.target''' )
_dump_articles(lowerCamelCase__ , text['''en'''] )
_dump_articles(lowerCamelCase__ , text['''de'''] )
UpperCamelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCamelCase = f'\n run_eval_search.py\n {model}\n {str(lowerCamelCase__ )}\n {str(lowerCamelCase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
with CaptureStdout() as cs:
run_search()
UpperCamelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCamelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(lowerCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase__ ).exists()
os.remove(Path(lowerCamelCase__ ) )
| 350 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {'vocab_file': 'spiece.model'}
snake_case_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ : Dict = {
'AI-Sweden/gpt-sw3-126m': 2_048,
'AI-Sweden/gpt-sw3-350m': 2_048,
'AI-Sweden/gpt-sw3-1.6b': 2_048,
'AI-Sweden/gpt-sw3-6.7b': 2_048,
'AI-Sweden/gpt-sw3-20b': 2_048,
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
UpperCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase = unk_token if pad_token is None else pad_token
UpperCamelCase = eos_token if bos_token is None else bos_token
else:
UpperCamelCase = '''<pad>''' if pad_token is None else pad_token
UpperCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase = re.compile(
f'[{"".join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.non_printing_characters_re.sub('''''' , lowerCamelCase__ )
# Normalize whitespaces
UpperCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
UpperCamelCase = unicodedata.normalize('''NFC''' , lowerCamelCase__ )
return text
def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.preprocess_text(lowerCamelCase__ )
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''''''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = self.preprocess_text(lowerCamelCase__ )
UpperCamelCase = self.sp_model.encode(lowerCamelCase__ )
else:
UpperCamelCase = [self.preprocess_text(lowerCamelCase__ ) for t in text]
UpperCamelCase = self.sp_model.encode(lowerCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCamelCase = torch.tensor(lowerCamelCase__ )
return token_ids
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.decode(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
UpperCamelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCamelCase__ ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase__ )
| 350 | 1 |
"""simple docstring"""
from functools import lru_cache
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> set:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2
_SCREAMING_SNAKE_CASE : Dict = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(__SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list:
_SCREAMING_SNAKE_CASE : Tuple = 2
while True:
# Increment each value of a generated range
_SCREAMING_SNAKE_CASE : int = [base + i for i in range(__SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_SCREAMING_SNAKE_CASE : Optional[int] = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group]
checker.append(__SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(__SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 4 )-> int:
_SCREAMING_SNAKE_CASE : Tuple = run(__SCREAMING_SNAKE_CASE )
return results[0] if len(__SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 338 | """simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase_ = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase_ = '''roberta'''
elif args.model_type == "gpt2":
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase_ = '''transformer'''
lowerCAmelCase_ = model.state_dict()
lowerCAmelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase_ = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase_ = F"{prefix}.embeddings.{w}.weight"
lowerCAmelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase_ = F"{prefix}.embeddings.LayerNorm.{w}"
lowerCAmelCase_ = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
lowerCAmelCase_ = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase_ = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F"lm_head.dense.{w}"]
lowerCAmelCase_ = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F"{prefix}.ln_f.{w}"]
lowerCAmelCase_ = state_dict['''lm_head.weight''']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 338 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = CpmAntTokenizer
_A : List[Any] = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : str = """今天天气真好!"""
snake_case_ : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Dict = """今天天气真好!"""
snake_case_ : Union[str, Any] = [tokenizer.bos_token] + tokens
snake_case_ : Union[str, Any] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 713 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__magic_name__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowerCAmelCase ( UpperCamelCase_ = "mumbai" ):
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__SCREAMING_SNAKE_CASE = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__SCREAMING_SNAKE_CASE = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = '''speech_to_text'''
__lowercase : List[str] = ['''past_key_values''']
__lowercase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = max_source_positions
__SCREAMING_SNAKE_CASE = max_target_positions
__SCREAMING_SNAKE_CASE = num_conv_layers
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = conv_channels
__SCREAMING_SNAKE_CASE = input_feat_per_channel
__SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 155 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = [[] for _ in range(_UpperCAmelCase )]
lowerCAmelCase = size
def __getitem__( self : str , SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self._size
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def __A ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = deque([start_vertex] )
lowerCAmelCase = [None] * self.size
lowerCAmelCase = 0
while queue:
lowerCAmelCase = queue.popleft()
lowerCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase = current_distance + edge.weight
lowerCAmelCase = distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase : Union[str, Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowercase : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowercase : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> List[str]:
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase = new_id
# turn into Numpy arrays
lowerCAmelCase = np.array(A__ )
lowerCAmelCase = np.array(A__ )
if reduce_labels:
lowerCAmelCase = 255
lowerCAmelCase = label - 1
lowerCAmelCase = 255
lowerCAmelCase = label != ignore_index
lowerCAmelCase = np.not_equal(A__ , A__ )
lowerCAmelCase = pred_label[mask]
lowerCAmelCase = np.array(A__ )[mask]
lowerCAmelCase = pred_label[pred_label == label]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(A__ , bins=A__ , range=(0, num_labels - 1) )[0]
lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = False , ) -> Optional[int]:
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(A__ , A__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __a ( A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = False , ) -> Dict:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_intersect_and_union(
A__ , A__ , A__ , A__ , A__ , A__ )
# compute metrics
lowerCAmelCase = {}
lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase = total_area_intersect / total_area_union
lowerCAmelCase = total_area_intersect / total_area_label
lowerCAmelCase = np.nanmean(A__ )
lowerCAmelCase = np.nanmean(A__ )
lowerCAmelCase = all_acc
lowerCAmelCase = iou
lowerCAmelCase = acc
if nan_to_num is not None:
lowerCAmelCase = {metric: np.nan_to_num(A__ , nan=A__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = mean_iou(
results=SCREAMING_SNAKE_CASE , gt_seg_maps=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , ignore_index=SCREAMING_SNAKE_CASE , nan_to_num=SCREAMING_SNAKE_CASE , label_map=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE , )
return iou_result
| 159 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.