code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : str = 1
__snake_case : int = 2
while i * i <= n:
__snake_case : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__( ) -> List[Any]:
__snake_case : Union[str, Any] = 1
__snake_case : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 243 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
_UpperCamelCase = ['''names''', '''prefix''']
_UpperCamelCase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_UpperCamelCase = ['''encoding_errors''', '''on_bad_lines''']
_UpperCamelCase = ['''date_format''']
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ : str =","
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[Union[int, List[int], str]] ="infer"
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[Union[int, str, List[int], List[str]]] =None
UpperCAmelCase_ : Optional[Union[List[int], List[str]]] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[Literal["c", "python", "pyarrow"]] =None
UpperCAmelCase_ : Dict[Union[int, str], Callable[[Any], Any]] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[Union[int, List[int]]] =None
UpperCAmelCase_ : Optional[int] =None
UpperCAmelCase_ : Optional[Union[str, List[str]]] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ="."
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ='"'
UpperCAmelCase_ : int =0
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : int =0
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : int =10_000
UpperCAmelCase_ : Optional[datasets.Features] =None
UpperCAmelCase_ : Optional[str] ="strict"
UpperCAmelCase_ : Literal["error", "warn", "skip"] ="error"
UpperCAmelCase_ : Optional[str] =None
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if self.delimiter is not None:
__snake_case : List[str] = self.delimiter
if self.column_names is not None:
__snake_case : Optional[Any] = self.column_names
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase_ : Dict =CsvConfig
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__snake_case : int = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Any = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : str = [files]
__snake_case : int = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase ( self , UpperCAmelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__snake_case : str = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(UpperCAmelCase , UpperCAmelCase )
return pa_table
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
__snake_case : Tuple = pd.read_csv(UpperCAmelCase , iterator=UpperCAmelCase , dtype=UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase ):
__snake_case : List[str] = pa.Table.from_pandas(UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}""" )
raise
| 243 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase ( ):
"""simple docstring"""
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(__UpperCamelCase )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(__UpperCamelCase )
__A = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 215 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case = pd.read_csv("sample_data.csv", header=None)
snake_case = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case = df.iloc[:, 1:2]
snake_case = actual_data.values.reshape(len_data, 1)
snake_case = MinMaxScaler().fit_transform(actual_data)
snake_case = 10
snake_case = 5
snake_case = 20
snake_case = len_data - periods * look_back
snake_case = actual_data[:division]
snake_case = actual_data[division - look_back :]
snake_case, snake_case = [], []
snake_case, snake_case = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case = np.array(train_x)
snake_case = np.array(test_x)
snake_case = np.array([list(i.ravel()) for i in train_y])
snake_case = np.array([list(i.ravel()) for i in test_y])
snake_case = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
snake_case = model.predict(x_test)
| 424 | from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 424 | 1 |
"""simple docstring"""
def a ( __UpperCAmelCase : int | float | str ) -> tuple[int, int]:
try:
__magic_name__: Dict = float(__UpperCAmelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__magic_name__: Tuple = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__magic_name__: Tuple = len(str(__UpperCAmelCase ).split(""".""" )[1] )
__magic_name__: int = int(decimal * (1_0**number_of_frac_digits) )
__magic_name__: List[Any] = 1_0**number_of_frac_digits
__magic_name__, __magic_name__: Union[str, Any] = denominator, numerator
while True:
__magic_name__: Tuple = dividend % divisor
if remainder == 0:
break
__magic_name__, __magic_name__: Dict = divisor, remainder
__magic_name__, __magic_name__: Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction('67') = }''')
print(f'''{decimal_to_fraction('45.0') = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction('6.25') = }''')
print(f'''{decimal_to_fraction('78td') = }''')
| 213 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "SpeechT5FeatureExtractor"
UpperCAmelCase__ = "SpeechT5Tokenizer"
def __init__( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Tuple:
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , *__snake_case : Tuple , **__snake_case : Tuple ) -> Any:
__magic_name__: List[Any] = kwargs.pop("""audio""" , __snake_case )
__magic_name__: Optional[int] = kwargs.pop("""text""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""text_target""" , __snake_case )
__magic_name__: List[str] = kwargs.pop("""audio_target""" , __snake_case )
__magic_name__: Dict = kwargs.pop("""sampling_rate""" , __snake_case )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__magic_name__: str = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
elif text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
else:
__magic_name__: Tuple = None
if audio_target is not None:
__magic_name__: List[str] = self.feature_extractor(audio_target=__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_values"""]
elif text_target is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
__magic_name__: Dict = targets["""input_ids"""]
else:
__magic_name__: Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: Optional[int] = labels
__magic_name__: Optional[int] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : int ) -> List[str]:
__magic_name__: List[Any] = kwargs.pop("""input_values""" , __snake_case )
__magic_name__: Any = kwargs.pop("""input_ids""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""labels""" , __snake_case )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__magic_name__: Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
elif input_ids is not None:
__magic_name__: int = self.tokenizer.pad(__snake_case , **__snake_case )
else:
__magic_name__: Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(__snake_case , __snake_case ) and "input_ids" in labels[0]):
__magic_name__: Union[str, Any] = self.tokenizer.pad(__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_ids"""]
else:
__magic_name__: Optional[Any] = self.feature_extractor.feature_size
__magic_name__: Optional[int] = self.feature_extractor.num_mel_bins
__magic_name__: str = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Tuple = feature_size_hack
__magic_name__: Tuple = targets["""input_values"""]
else:
__magic_name__: int = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: List[Any] = labels
__magic_name__: Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : List[str] , **__snake_case : str ) -> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
| 213 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = num_patches + 1
def snake_case_ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = TFViTModel(config=a__ )
_lowerCamelCase = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase = self.image_size // 2
_lowerCamelCase = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
_lowerCamelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = TFViTForImageClassification(a__ )
_lowerCamelCase = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase = self.image_size // 2
_lowerCamelCase = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTForImageClassification(a__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
_lowerCamelCase = TFViTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case_ ( self ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case_ ( self ):
_lowerCamelCase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
_lowerCamelCase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=a__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**a__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCamelCase = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a__ , atol=1e-4 )
| 650 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
| 650 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = RoFormerTokenizer
__lowerCamelCase : List[Any] = RoFormerTokenizerFast
__lowerCamelCase : Any = True
__lowerCamelCase : str = True
def a__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
def a__ (self , **A ) -> List[str]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **A )
def a__ (self , **A ) -> int:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **A )
def a__ (self ) -> int:
"""simple docstring"""
_a = '''永和服装饰品有限公司,今天天气非常好'''
_a = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.get_tokenizer()
_a , _a = self.get_chinese_input_output_texts()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
_a = tokens + [tokenizer.unk_token]
_a = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.get_rust_tokenizer()
_a , _a = self.get_chinese_input_output_texts()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
_a = tokens + [tokenizer.unk_token]
_a = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ (self ) -> int:
"""simple docstring"""
pass
def a__ (self ) -> str:
"""simple docstring"""
pass
| 352 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''')
if not scores:
raise ValueError('''Scores cannot be empty''')
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
)
def lowerCAmelCase ():
"""simple docstring"""
_a = [90, 23, 6, 33, 21, 65, 123, 34_423]
_a = math.log(len(__A) , 2)
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase : int =5_0000
__lowerCAmelCase : Union[str, Any] =5000
__lowerCAmelCase , __lowerCAmelCase : Tuple =os.path.split(__file__)
__lowerCAmelCase : int =os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : int ):
for i in range(_lowerCamelCase ):
A__ = dataset[i]
@get_duration
def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
A__ = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(_lowerCamelCase ):
A__ = dataset[i]
@get_duration
def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
A__ = dataset[i : i + batch_size]
def UpperCamelCase ( ):
A__ = {"num examples": SPEED_TEST_N_EXAMPLES}
A__ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}),
]
A__ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
A__ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
A__ = generate_example_dataset(
os.path.join(_lowerCamelCase , "dataset.arrow" ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={"list": (1_00,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCamelCase ) )
A__ = func(_lowerCamelCase , **_lowerCamelCase )
print("shuffling dataset" )
A__ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(_lowerCamelCase ) )
A__ = func(
_lowerCamelCase , **_lowerCamelCase )
with open(_lowerCamelCase , "wb" ) as f:
f.write(json.dumps(_lowerCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 440 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : int ="https://openaipublic.azureedge.net/jukebox/models/"
__lowerCAmelCase : Any ={
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCamelCase ( _lowerCamelCase : str ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
A__ = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
A__ = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
A__ = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str ):
A__ = {}
import re
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_conv_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
A__ = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_proj_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
A__ = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
A__ = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
A__ = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
A__ = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
A__ = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
A__ = original_key
A__ = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
A__ = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
A__ = original_key
A__ = original_key
A__ = value
return new_dict
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : str=None , _lowerCamelCase : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
A__ = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
A__ = MODEL_MAPPING[model_name.split("/" )[-1]]
A__ = JukeboxConfig.from_pretrained(_lowerCamelCase )
A__ = JukeboxModel(_lowerCamelCase )
A__ = []
A__ = {}
for i, dict_name in enumerate(_lowerCamelCase ):
A__ = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
A__ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
A__ = old_dic[k]
elif k.endswith(".w" ):
A__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ = old_dic[k]
else:
A__ = old_dic[k]
A__ = "vqvae" if i == 0 else F"priors.{3 - i}"
A__ = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
A__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCAmelCase : int =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 440 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
def is_in_circle(lowerCamelCase_ : float ,lowerCamelCase_ : float) -> bool:
lowerCAmelCase__ : Any = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase__ : List[Any] = mean(
int(is_in_circle(uniform(-1.0 ,1.0) ,uniform(-1.0 ,1.0)))
for _ in range(lowerCamelCase_))
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase__ : int = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""")
print(f"""The numpy value of pi is {pi}""")
print(f"""The total error is {abs(pi - pi_estimate)}""")
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Callable[[float], float] ,lowerCamelCase_ : float = 0.0 ,lowerCamelCase_ : float = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowerCamelCase_ ,lowerCamelCase_)) for _ in range(lowerCamelCase_)) * (max_value - min_value)
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : float = 0.0 ,lowerCamelCase_ : float = 1.0):
'''simple docstring'''
def identity_function(lowerCamelCase_ : float) -> float:
return x
lowerCAmelCase__ : Dict = area_under_curve_estimator(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : int = (max_value * max_value - min_value * min_value) / 2
print('''******************''')
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(f"""Estimated value is {estimated_value}""")
print(f"""Expected value is {expected_value}""")
print(f"""Total error is {abs(estimated_value - expected_value)}""")
print('''******************''')
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
def function_to_integrate(lowerCamelCase_ : float) -> float:
return sqrt(4.0 - x * x)
lowerCAmelCase__ : Optional[Any] = area_under_curve_estimator(
lowerCamelCase_ ,lowerCamelCase_ ,0.0 ,2.0)
print('''******************''')
print('''Estimating pi using area_under_curve_estimator''')
print(f"""Estimated value is {estimated_value}""")
print(f"""Expected value is {pi}""")
print(f"""Total error is {abs(estimated_value - pi)}""")
print('''******************''')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""encoder-decoder"""
snake_case_ =True
def __init__(self ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''encoder''' )
lowerCAmelCase__ : Any = encoder_config.pop('''model_type''' )
lowerCAmelCase__ : str = kwargs.pop('''decoder''' )
lowerCAmelCase__ : Tuple = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase__ : Tuple = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : str = True
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.encoder.to_dict()
lowerCAmelCase__ : str = self.decoder.to_dict()
lowerCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 90 | 0 |
"""simple docstring"""
import re
def __A ( a_ :str) -> str:
if len(re.findall('''[ATCG]''' , a_)) != len(a_):
raise ValueError('''Invalid Strand''')
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC'''))
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase__ :
def __init__( self ,A = None ):
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(A ,self.__components ) ) + ")"
def __add__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("""must have the same size""" )
def __sub__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,(float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A ,A ) and len(self ) == len(A ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("""invalid operand!""" )
def _UpperCamelCase ( self ):
return Vector(self.__components )
def _UpperCamelCase ( self ,A ):
if isinstance(A ,A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _UpperCamelCase ( self ,A ,A ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def _UpperCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def _UpperCamelCase ( self ,A ,A = False ):
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
return Vector([0] * dimension )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and (isinstance(_snake_case , _snake_case ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(_snake_case )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (isinstance(_snake_case , (int, float) ))
)
return x * scalar + y
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )]
return Vector(_snake_case )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,A ): # matrix-vector
if len(A ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A ,sum(A ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A ,(int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ):
return self.__height
def _UpperCamelCase ( self ):
return self.__width
def _UpperCamelCase ( self ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A ,A )
else:
raise Exception("""Indices out of bounds""" )
def _UpperCamelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 ,A ) for y in range(self.__width )
]
return sum(A )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0] * n for _ in range(_snake_case )]
return Matrix(_snake_case , _snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [
[random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )] for _ in range(_snake_case )
]
return Matrix(_snake_case , _snake_case , _snake_case )
| 341 | 0 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase ):
UpperCamelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
UpperCamelCase__ = kwargs.pop("""main_process_only""" , __lowerCAmelCase )
UpperCamelCase__ = kwargs.pop("""in_order""" , __lowerCAmelCase )
if self.isEnabledFor(__lowerCAmelCase ):
if self._should_log(__lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif in_order:
UpperCamelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase__ , UpperCamelCase__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
state.wait_for_everyone()
def _UpperCamelCase (a__ :str , a__ :str = None ):
"""simple docstring"""
if log_level is None:
UpperCamelCase__ = os.environ.get("""ACCELERATE_LOG_LEVEL""" , a__ )
UpperCamelCase__ = logging.getLogger(a__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(a__ , {} )
| 710 |
import argparse
import datetime
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCamelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(a__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase__ = datetime.date(int(a__ ) , int(a__ ) , int(a__ ) )
# Start math
if m <= 2:
UpperCamelCase__ = y - 1
UpperCamelCase__ = m + 12
# maths var
UpperCamelCase__ = int(str(a__ )[:2] )
UpperCamelCase__ = int(str(a__ )[2:] )
UpperCamelCase__ = int(2.6 * m - 5.39 )
UpperCamelCase__ = int(c / 4 )
UpperCamelCase__ = int(k / 4 )
UpperCamelCase__ = int(d + k )
UpperCamelCase__ = int(t + u + v + x )
UpperCamelCase__ = int(z - (2 * c) )
UpperCamelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase__ = f"""Your date {date_input}, is a {days[str(a__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
UpperCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 548 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowercase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowercase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase = Accelerator()
UpperCAmelCase = (accelerator.state.process_index + 2, 10)
UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase = ''''''
UpperCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 84 |
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
a__ = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
a__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
a__ , a__ = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def __a ( __UpperCAmelCase , __UpperCAmelCase = None ):
if log_level is None:
a__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __UpperCAmelCase )
a__ = logging.getLogger(__UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__UpperCAmelCase , {} )
| 194 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Dict = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Any = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
UpperCamelCase__ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase__ : Tuple = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=SCREAMING_SNAKE_CASE , output_all_encodings=SCREAMING_SNAKE_CASE , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , SCREAMING_SNAKE_CASE ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase__ : int = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCamelCase__ : Union[str, Any] = os.path.join(get_home_dir() , '''models''' )
UpperCamelCase__ : Union[str, Any] = _load_vocab(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nlp.model.BERTModel(
SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=SCREAMING_SNAKE_CASE , use_token_type_embed=SCREAMING_SNAKE_CASE , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=SCREAMING_SNAKE_CASE , use_decoder=SCREAMING_SNAKE_CASE , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE , cast_dtype=SCREAMING_SNAKE_CASE , ignore_extra=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase__ : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(SCREAMING_SNAKE_CASE ),
}
UpperCamelCase__ : Union[str, Any] = BertConfig.from_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = BertForMaskedLM(SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
UpperCamelCase__ : Optional[int] = hf_param.shape
UpperCamelCase__ : List[Any] = to_torch(params[gluon_param] )
UpperCamelCase__ : List[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
UpperCamelCase__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
UpperCamelCase__ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
UpperCamelCase__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
UpperCamelCase__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase__ : int = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase__ : BertSelfAttention = layer.attention.self
UpperCamelCase__ : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
UpperCamelCase__ : Optional[Any] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
UpperCamelCase__ : Tuple = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
UpperCamelCase__ : str = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
UpperCamelCase__ : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
UpperCamelCase__ : Dict = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
UpperCamelCase__ : BertSelfOutput = layer.attention.output
UpperCamelCase__ : Any = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
UpperCamelCase__ : Optional[int] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
UpperCamelCase__ : List[str] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
UpperCamelCase__ : Dict = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
UpperCamelCase__ : BertIntermediate = layer.intermediate
UpperCamelCase__ : Optional[int] = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
UpperCamelCase__ : Tuple = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
UpperCamelCase__ : BertOutput = layer.output
UpperCamelCase__ : str = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
UpperCamelCase__ : Any = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
UpperCamelCase__ : List[Any] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
UpperCamelCase__ : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCamelCase__ : Union[str, Any] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE )['''input_ids''']
# Get gluon output
UpperCamelCase__ : Any = mx.nd.array([input_ids] )
UpperCamelCase__ : int = original_bort(inputs=SCREAMING_SNAKE_CASE , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = BertModel.from_pretrained(SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
UpperCamelCase__ : Union[str, Any] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Any = hf_bort_model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : str = output_gluon[0].asnumpy()
UpperCamelCase__ : int = output_hf[0].detach().numpy()
UpperCamelCase__ : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase__ : Optional[int] = np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 106 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__UpperCamelCase : int = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _a ( SCREAMING_SNAKE_CASE : str = "mumbai" ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCamelCase__ : Optional[Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCamelCase__ : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 106 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
__lowerCamelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowerCamelCase = 128
elif "12-12" in model_name:
__lowerCamelCase = 12
__lowerCamelCase = 12
elif "14-14" in model_name:
__lowerCamelCase = 14
__lowerCamelCase = 14
elif "16-16" in model_name:
__lowerCamelCase = 16
__lowerCamelCase = 16
else:
raise ValueError("""Model not supported""" )
__lowerCamelCase = "huggingface/label-files"
if "speech-commands" in model_name:
__lowerCamelCase = 35
__lowerCamelCase = "speech-commands-v2-id2label.json"
else:
__lowerCamelCase = 527
__lowerCamelCase = "audioset-id2label.json"
__lowerCamelCase = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(__A ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
if "module.v" in name:
__lowerCamelCase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowerCamelCase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowerCamelCase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowerCamelCase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowerCamelCase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(__A )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
__lowerCamelCase = int(key_split[3] )
__lowerCamelCase = config.hidden_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
__lowerCamelCase = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
@torch.no_grad()
def UpperCamelCase__ ( _A: int , _A: Union[str, Any] , _A: List[str]=False ):
'''simple docstring'''
__lowerCamelCase = get_audio_spectrogram_transformer_config(__A )
__lowerCamelCase = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
__lowerCamelCase = model_name_to_url[model_name]
__lowerCamelCase = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )
# remove some keys
remove_keys(__A )
# rename some keys
__lowerCamelCase = convert_state_dict(__A , __A )
# load 🤗 model
__lowerCamelCase = ASTForAudioClassification(__A )
model.eval()
model.load_state_dict(__A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowerCamelCase = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
__lowerCamelCase = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
__lowerCamelCase = 1024 if "speech-commands" not in model_name else 128
__lowerCamelCase = ASTFeatureExtractor(mean=__A , std=__A , max_length=__A )
if "speech-commands" in model_name:
__lowerCamelCase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowerCamelCase = dataset[0]["audio"]["array"]
else:
__lowerCamelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowerCamelCase = torchaudio.load(__A )
__lowerCamelCase = waveform.squeeze().numpy()
__lowerCamelCase = feature_extractor(__A , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**__A )
__lowerCamelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowerCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowerCamelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowerCamelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowerCamelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowerCamelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowerCamelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowerCamelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowerCamelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __A , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(__A )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : Optional[int] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 479 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__(self: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: List[Any]=13 , __UpperCAmelCase: Any=30 , __UpperCAmelCase: List[str]=2 , __UpperCAmelCase: Optional[int]=3 , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: str=True , __UpperCAmelCase: int=32 , __UpperCAmelCase: Tuple=5 , __UpperCAmelCase: Optional[Any]=4 , __UpperCAmelCase: Dict=37 , __UpperCAmelCase: List[Any]="gelu" , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Optional[Any]=10 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: Any=3 , __UpperCAmelCase: Tuple=0.6 , __UpperCAmelCase: str=None , ) -> List[Any]:
'''simple docstring'''
__a : int = parent
__a : Any = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : List[Any] = is_training
__a : Tuple = use_labels
__a : str = hidden_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Any = initializer_range
__a : Dict = mask_ratio
__a : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : Any = (image_size // patch_size) ** 2
__a : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ (self: Any ) -> str:
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple , __UpperCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
__a : str = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: Dict ) -> Dict:
'''simple docstring'''
__a : Optional[Any] = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : List[str] = model(__UpperCAmelCase )
__a : Union[str, Any] = (self.image_size // self.patch_size) ** 2
__a : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[int] = model(__UpperCAmelCase )
__a : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: int ) -> Dict:
'''simple docstring'''
__a : List[str] = ViTMAEModelTester(self )
__a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCAmelCase__ (self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ (self: Dict ) -> Tuple:
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCAmelCase )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase__ (self: Any ) -> Optional[int]:
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Tuple:
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: int , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
__a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : Tuple = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : Union[str, Any] = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: str ) -> Dict:
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__a : Dict = outputs[0].cpu().numpy()
__a : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
__a : Any = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
__a : Dict = after_outputs[0].cpu().numpy()
__a : str = 0
__a : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ (self: Tuple ) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a_ () -> Optional[Any]:
"""simple docstring"""
__a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ (self: Optional[int] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self: str ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(2 )
__a : int = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase )
__a : Dict = self.default_image_processor
__a : List[str] = prepare_img()
__a : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Optional[Any] = ViTMAEConfig()
__a : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : int = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : Union[str, Any] = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
__a : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__a : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
| 351 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] =logging.get_logger(__name__)
A_ : Optional[Any] ={
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __a ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = """align_text_model"""
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ):
super().__init__(**snake_case_ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = pad_token_id
@classmethod
def snake_case_ ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(snake_case_ )
_lowerCamelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_lowerCamelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __a ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = """align_vision_model"""
def __init__( self , a__ = 3 , a__ = 6_00 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 1_12, 1_92] , a__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 25_60 , a__ = "mean" , a__ = 0.02 , a__ = 0.001 , a__ = 0.99 , a__ = 0.2 , **a__ , ):
super().__init__(**snake_case_ )
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = width_coefficient
_lowerCamelCase = depth_coefficient
_lowerCamelCase = depth_divisor
_lowerCamelCase = kernel_sizes
_lowerCamelCase = in_channels
_lowerCamelCase = out_channels
_lowerCamelCase = depthwise_padding
_lowerCamelCase = strides
_lowerCamelCase = num_block_repeats
_lowerCamelCase = expand_ratios
_lowerCamelCase = squeeze_expansion_ratio
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dim
_lowerCamelCase = pooling_type
_lowerCamelCase = initializer_range
_lowerCamelCase = batch_norm_eps
_lowerCamelCase = batch_norm_momentum
_lowerCamelCase = drop_connect_rate
_lowerCamelCase = sum(snake_case_ ) * 4
@classmethod
def snake_case_ ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(snake_case_ )
_lowerCamelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_lowerCamelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __a ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = """align"""
SCREAMING_SNAKE_CASE__ : Dict = True
def __init__( self , a__=None , a__=None , a__=6_40 , a__=1.0 , a__=0.02 , **a__ , ):
super().__init__(**snake_case_ )
if text_config is None:
_lowerCamelCase = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
_lowerCamelCase = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
_lowerCamelCase = AlignTextConfig(**snake_case_ )
_lowerCamelCase = AlignVisionConfig(**snake_case_ )
_lowerCamelCase = projection_dim
_lowerCamelCase = temperature_init_value
_lowerCamelCase = initializer_range
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.text_config.to_dict()
_lowerCamelCase = self.vision_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 721 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( a_ : Optional[int] , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE :int = sorted(zip(a_ , a_ ) , key=lambda a_ : x[0] / x[1] , reverse=a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(accumulate(a_ ) )
__SCREAMING_SNAKE_CASE :Dict = bisect(a_ , a_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return max(metric_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for gt in ground_truths )
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
_lowerCAmelCase = []
if args.gold_data_mode == "qa":
_lowerCAmelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , sep="\t" , header=SCREAMING_SNAKE_CASE_ )
for answer_list in data[1]:
_lowerCAmelCase = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
answers.append(SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
_lowerCAmelCase = [[reference] for reference in references]
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = 100.0 * em / total
_lowerCAmelCase = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = args.k
_lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
_lowerCAmelCase = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
_lowerCAmelCase = _lowerCAmelCase = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = set(hypo.split("\t" )[:k] )
_lowerCAmelCase = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
def strip_title(SCREAMING_SNAKE_CASE_ : List[str] ):
if title.startswith("\"" ):
_lowerCAmelCase = title[1:]
if title.endswith("\"" ):
_lowerCAmelCase = title[:-1]
return title
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , )["input_ids"].to(args.device )
_lowerCAmelCase = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = question_enc_outputs[0]
_lowerCAmelCase = rag_model.retriever(
SCREAMING_SNAKE_CASE_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase = []
for docs in all_docs:
_lowerCAmelCase = [strip_title(SCREAMING_SNAKE_CASE_ ) for title in docs["title"]]
provenance_strings.append("\t".join(SCREAMING_SNAKE_CASE_ ) )
return provenance_strings
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info("Q: {} - A: {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return answers
def __a():
'''simple docstring'''
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=SCREAMING_SNAKE_CASE_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=SCREAMING_SNAKE_CASE_ , choices=["exact", "compressed", "legacy"] , type=SCREAMING_SNAKE_CASE_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=SCREAMING_SNAKE_CASE_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=SCREAMING_SNAKE_CASE_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=SCREAMING_SNAKE_CASE_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=SCREAMING_SNAKE_CASE_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=SCREAMING_SNAKE_CASE_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=SCREAMING_SNAKE_CASE_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=SCREAMING_SNAKE_CASE_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = {}
if args.model_type is None:
_lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCAmelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCAmelCase = args.n_docs
if args.index_name is not None:
_lowerCAmelCase = args.index_name
if args.index_path is not None:
_lowerCAmelCase = args.index_path
else:
_lowerCAmelCase = BartForConditionalGeneration
_lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCAmelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(SCREAMING_SNAKE_CASE_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCAmelCase = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , retriever=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.retriever.init_retrieval()
else:
_lowerCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCAmelCase = []
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE_ ) == args.eval_batch_size:
_lowerCAmelCase = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) + "\n" )
preds_file.flush()
_lowerCAmelCase = []
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCAmelCase = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_args()
main(args)
| 18 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : int = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 329 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# load base model
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase__ = load_file(_lowerCAmelCase )
UpperCAmelCase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
UpperCAmelCase__ = pipeline.text_encoder
else:
UpperCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
UpperCAmelCase__ = pipeline.unet
# find the target layer
UpperCAmelCase__ = layer_infos.pop(0 )
while len(_lowerCAmelCase ) > -1:
try:
UpperCAmelCase__ = curr_layer.__getattr__(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ = layer_infos.pop(0 )
elif len(_lowerCAmelCase ) == 0:
break
except Exception:
if len(_lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase__ = layer_infos.pop(0 )
UpperCAmelCase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(_lowerCAmelCase )
else:
pair_keys.append(_lowerCAmelCase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase__ = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase , _lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCAmelCase )
return pipeline
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
snake_case__ : str = parser.parse_args()
snake_case__ : Union[str, Any] = args.base_model_path
snake_case__ : str = args.checkpoint_path
snake_case__ : Tuple = args.dump_path
snake_case__ : Optional[Any] = args.lora_prefix_unet
snake_case__ : str = args.lora_prefix_text_encoder
snake_case__ : Tuple = args.alpha
snake_case__ : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
snake_case__ : Any = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 392 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ : Optional[int] = ['''text''', '''image''', '''audio''']
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
inputs.append(create_inputs(_lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(_lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self : List[Any] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def UpperCAmelCase ( self : Tuple ) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase ( self : List[Any] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( self : List[str] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 392 | 1 |
def lowerCamelCase__ ( a : int = 50 ) -> int:
"""simple docstring"""
a__ :int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'albert'
def __init__( self : Any , __A : Optional[int]=30000 , __A : List[str]=128 , __A : int=4096 , __A : Any=12 , __A : Union[str, Any]=1 , __A : Optional[int]=64 , __A : Dict=16384 , __A : List[str]=1 , __A : Any="gelu_new" , __A : List[Any]=0 , __A : str=0 , __A : List[str]=512 , __A : Optional[Any]=2 , __A : Tuple=0.02 , __A : int=1E-12 , __A : str=0.1 , __A : Optional[Any]="absolute" , __A : Tuple=0 , __A : str=2 , __A : Union[str, Any]=3 , **__A : Union[str, Any] , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a__ :Tuple = vocab_size
a__ :Dict = embedding_size
a__ :Union[str, Any] = hidden_size
a__ :int = num_hidden_layers
a__ :List[Any] = num_hidden_groups
a__ :str = num_attention_heads
a__ :Optional[Any] = inner_group_num
a__ :Any = hidden_act
a__ :Optional[int] = intermediate_size
a__ :Optional[Any] = hidden_dropout_prob
a__ :Optional[int] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :List[Any] = type_vocab_size
a__ :Dict = initializer_range
a__ :Optional[int] = layer_norm_eps
a__ :Optional[Any] = classifier_dropout_prob
a__ :Optional[Any] = position_embedding_type
class lowerCAmelCase_ ( _a):
@property
def _snake_case ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ :List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ :Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 373 | 1 |
class __A :
def __init__( self :Optional[int] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[Any] =size
__magic_name__ : Union[str, Any] =[0] * size
__magic_name__ : Optional[int] =[0] * size
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def A__ ( self :Optional[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[int] =value
while index < self.size:
__magic_name__ : List[Any] =self.get_prev(__snake_case ) + 1
if current_left_border == index:
__magic_name__ : str =value
else:
__magic_name__ : Tuple =max(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self.get_next(__snake_case )
def A__ ( self :List[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__magic_name__ : Optional[Any] =0
while left <= right:
__magic_name__ : int =self.get_prev(__snake_case )
if left <= current_left:
__magic_name__ : str =max(__snake_case , self.tree[right] )
__magic_name__ : int =current_left
else:
__magic_name__ : int =max(__snake_case , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : Dict = get_tests_dir('fixtures')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def _snake_case ( cls : List[str] ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id="test-feature-extractor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : Optional[Any] ):
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" ) | 698 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main() | 698 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _snake_case ( __snake_case : Tuple=None ):
"""simple docstring"""
if subparsers is not None:
_lowerCamelCase : str = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_lowerCamelCase : Optional[int] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__snake_case , default=__snake_case , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__snake_case , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__snake_case , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_lowerCamelCase : List[Any] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__snake_case , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : str = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__snake_case ):
_lowerCamelCase : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_lowerCamelCase : Dict = defaults.command_file
if not args.command and defaults.commands is not None:
_lowerCamelCase : List[Any] = defaults.commands
if not args.tpu_name:
_lowerCamelCase : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
_lowerCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
_lowerCamelCase : Optional[int] = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_lowerCamelCase : Dict = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __snake_case ):
_lowerCamelCase : Any = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_lowerCamelCase : Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __snake_case ):
_lowerCamelCase : Tuple = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_lowerCamelCase : Optional[int] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
_lowerCamelCase : List[str] = """; """.join(__snake_case )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_lowerCamelCase : Dict = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(__snake_case )}' )
return
subprocess.run(__snake_case )
print("""Successfully setup pod.""" )
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = tpu_command_parser()
_lowerCamelCase : Optional[Any] = parser.parse_args()
tpu_command_launcher(__snake_case )
| 88 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]:
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[str] = min_size
_lowerCamelCase : Tuple = max_size
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = hidden_dim
_lowerCamelCase : Dict = hidden_dim
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5
).float()
_lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long()
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_lowerCamelCase : Any = self.num_queries
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : int = [1, 1, 1, 1]
_lowerCamelCase : Any = self.num_channels
_lowerCamelCase : Optional[Any] = 64
_lowerCamelCase : str = 128
_lowerCamelCase : Optional[Any] = self.hidden_dim
_lowerCamelCase : Any = self.hidden_dim
_lowerCamelCase : List[Any] = self.hidden_dim
return config
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : str = output.encoder_hidden_states
_lowerCamelCase : int = output.pixel_decoder_hidden_states
_lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]:
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = model(
pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = MaskaFormerModelTester(self)
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""")
def UpperCamelCase_ ( self) -> Tuple:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""")
def UpperCamelCase_ ( self) -> List[Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""")
def UpperCamelCase_ ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""")
def UpperCamelCase_ ( self) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Optional[int]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Dict = (self.model_tester.min_size,) * 2
_lowerCamelCase : str = {
"""pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE),
"""mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE),
"""class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(),
}
_lowerCamelCase : List[str] = self.model_tester.get_config()
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.attentions is not None)
def UpperCamelCase_ ( self) -> Optional[Any]:
if not self.model_tester.is_training:
return
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss
loss.backward()
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCAmelCase = 1e-4
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase_ ( self) -> Union[str, Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Any = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Dict = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE)
# masks_queries_logits
_lowerCamelCase : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowerCamelCase : Any = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
# class_queries_logits
_lowerCamelCase : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : Tuple = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , )
_lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]]
_lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
| 88 | 1 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a__ = True
except ImportError:
a__ = False
try:
from torch.hub import _get_torch_home
a__ = _get_torch_home()
except ImportError:
a__ = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
a__ = os.path.join(torch_cache_home, '''transformers''')
a__ = '''https://cdn.huggingface.co'''
a__ = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
a__ = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
a__ = os.path.join(PATH, '''config.yaml''')
a__ = os.path.join(PATH, '''attributes.txt''')
a__ = os.path.join(PATH, '''objects.txt''')
a__ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
a__ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
a__ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
a__ = '''pytorch_model.bin'''
a__ = '''config.yaml'''
def snake_case__ ( a=OBJECTS , a=ATTRIBUTES ) -> int:
'''simple docstring'''
snake_case__ = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
snake_case__ = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def snake_case__ ( a ) -> Dict:
'''simple docstring'''
snake_case__ = OrderedDict()
with open(_lowerCamelCase , """rb""" ) as f:
snake_case__ = pkl.load(_lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case__ = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
snake_case__ = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase )
snake_case__ = v
return r
class __magic_name__:
UpperCAmelCase_ : List[str] = {}
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any = "root" , __UpperCamelCase : Tuple=0 ):
'''simple docstring'''
snake_case__ = name
snake_case__ = level
snake_case__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case__ = copy.deepcopy(_lowercase )
snake_case__ = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
snake_case__ = Config(_lowercase , name=_lowercase , level=level + 1 )
snake_case__ = v
setattr(self , _lowercase , _lowercase )
snake_case__ = d
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case__ = val
snake_case__ = val
snake_case__ = key.split(""".""" )
snake_case__ = len(_lowercase ) - 1
snake_case__ = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , """.""".join(levels[i:] ) , _lowercase )
if l == last_level:
snake_case__ = val
else:
snake_case__ = pointer[l]
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
return self._pointer
def __lowerCAmelCase( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(f"""{file_name}""" , """w""" ) as stream:
dump(_lowercase , _lowercase )
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
'''simple docstring'''
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def __lowerCAmelCase( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(_lowercase ) as stream:
snake_case__ = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Any ):
'''simple docstring'''
snake_case__ = """ """
if self._name != "root":
snake_case__ = f"""{t * (self._level-1)}{self._name}:\n"""
else:
snake_case__ = """"""
snake_case__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n"""
snake_case__ = level
return r[:-1]
@classmethod
def __lowerCAmelCase( cls : Optional[int] , __UpperCamelCase : List[str] , **__UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case__ = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def __lowerCAmelCase( cls : Any , __UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case__ = kwargs.pop("""cache_dir""" , _lowercase )
snake_case__ = kwargs.pop("""force_download""" , _lowercase )
snake_case__ = kwargs.pop("""resume_download""" , _lowercase )
snake_case__ = kwargs.pop("""proxies""" , _lowercase )
snake_case__ = kwargs.pop("""local_files_only""" , _lowercase )
if os.path.isdir(_lowercase ):
snake_case__ = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
snake_case__ = pretrained_model_name_or_path
else:
snake_case__ = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
snake_case__ = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case__ = Config.load_yaml(_lowercase )
except EnvironmentError:
snake_case__ = """Can't load config for"""
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_lowercase ), kwargs
def snake_case__ ( a ) -> int:
'''simple docstring'''
snake_case__ = torch.load("""dump.pt""" , map_location=in_tensor.device )
snake_case__ = in_tensor.numpy()
snake_case__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def snake_case__ ( a ) -> int:
'''simple docstring'''
snake_case__ = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def snake_case__ ( a , a , a=True ) -> Dict:
'''simple docstring'''
snake_case__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case__ = """/""" not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def snake_case__ ( a , a , a=None , a=0 , a=None , ) -> Tuple:
'''simple docstring'''
snake_case__ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + user_agent
snake_case__ = {"""user-agent""": ua}
if resume_size > 0:
snake_case__ = """bytes=%d-""" % (resume_size,)
snake_case__ = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
snake_case__ = response.headers.get("""Content-Length""" )
snake_case__ = resume_size + int(_lowerCamelCase ) if content_length is not None else None
snake_case__ = tqdm(
unit="""B""" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def snake_case__ ( a , a=None , a=False , a=None , a=10 , a=False , a=None , a=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
snake_case__ = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
snake_case__ = None
if not local_files_only:
try:
snake_case__ = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase )
if response.status_code == 200:
snake_case__ = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case__ = url_to_filename(_lowerCamelCase , _lowerCamelCase )
# get cache path to put the file
snake_case__ = os.path.join(_lowerCamelCase , _lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
snake_case__ = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case__ = cache_path + """.lock"""
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case__ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase , """a+b""" ) as f:
yield f
snake_case__ = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
snake_case__ = os.stat(_lowerCamelCase ).st_size
else:
snake_case__ = 0
else:
snake_case__ = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase )
snake_case__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _lowerCamelCase , temp_file.name , )
http_get(
_lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , )
os.replace(temp_file.name , _lowerCamelCase )
snake_case__ = {"""url""": url, """etag""": etag}
snake_case__ = cache_path + """.json"""
with open(_lowerCamelCase , """w""" ) as meta_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
return cache_path
def snake_case__ ( a , a=None ) -> Tuple:
'''simple docstring'''
snake_case__ = url.encode("""utf-8""" )
snake_case__ = shaaaa(_lowerCamelCase )
snake_case__ = url_hash.hexdigest()
if etag:
snake_case__ = etag.encode("""utf-8""" )
snake_case__ = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def snake_case__ ( a , a=None , a=False , a=None , a=False , a=None , a=False , a=False , a=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
snake_case__ = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ = str(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
snake_case__ = get_from_cache(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , )
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
snake_case__ = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case__ = os.path.split(_lowerCamelCase )
snake_case__ = output_file.replace(""".""" , """-""" ) + """-extracted"""
snake_case__ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case__ = output_path + """.lock"""
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
snake_case__ = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def snake_case__ ( a , a="," ) -> int:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
snake_case__ = eval(f.read() )
else:
snake_case__ = requests.get(_lowerCamelCase )
try:
snake_case__ = requests.json()
except Exception:
snake_case__ = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case__ = eval(_lowerCamelCase )
except Exception:
snake_case__ = data.split("""\n""" )
req.close()
return data
def snake_case__ ( a ) -> Tuple:
'''simple docstring'''
snake_case__ = requests.get(_lowerCamelCase )
snake_case__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def snake_case__ ( a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase , """rb""" ) as stream:
snake_case__ = pkl.load(_lowerCamelCase )
snake_case__ = weights.pop("""model""" )
snake_case__ = {}
for k, v in model.items():
snake_case__ = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
snake_case__ = torch.tensor([0] )
snake_case__ = k.replace("""running_var""" , """num_batches_tracked""" )
snake_case__ = zero
return new
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
print(F"""{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def snake_case__ ( a , a="RGB" ) -> Tuple:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
snake_case__ = cva.imread(_lowerCamelCase )
else:
snake_case__ = get_image_from_url(_lowerCamelCase )
assert img is not None, F"""could not connect to: {im}"""
snake_case__ = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case__ = img[:, :, ::-1]
return img
def snake_case__ ( a , a=1 ) -> Union[str, Any]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )) | 719 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ = logging.get_logger(__name__)
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = set()
snake_case__ = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
snake_case__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
snake_case__ = """\n""".join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
snake_case__ = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
snake_case__ = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
snake_case__ = set()
snake_case__ = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( a ) -> int:
'''simple docstring'''
return values.split(""",""" )
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a__ = parser.parse_args()
a__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ = extract_warnings(args.output_dir, args.targets)
a__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 566 | 0 |
import string
import numpy
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , snake_case )
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase_ = numpy.vectorize(lambda lowerCAmelCase__ : x % 36 )
lowerCAmelCase_ = numpy.vectorize(lowerCAmelCase__ )
def __init__( self : List[Any] , _A : numpy.ndarray ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.modulus(_A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__SCREAMING_SNAKE_CASE : str = encrypt_key.shape[0]
def UpperCAmelCase__ ( self : Optional[Any] , _A : str ):
"""simple docstring"""
return self.key_string.index(_A )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int ):
"""simple docstring"""
return self.key_string[round(_A )]
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__SCREAMING_SNAKE_CASE : str = det % len(self.key_string )
__SCREAMING_SNAKE_CASE : Any = len(self.key_string )
if greatest_common_divisor(_A , len(self.key_string ) ) != 1:
__SCREAMING_SNAKE_CASE : Tuple = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [char for char in text.upper() if char in self.key_string]
__SCREAMING_SNAKE_CASE : str = chars[-1]
while len(_A ) % self.break_key != 0:
chars.append(_A )
return "".join(_A )
def UpperCAmelCase__ ( self : Any , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.process_text(text.upper() )
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
for i in range(0 , len(_A ) - self.break_key + 1 , self.break_key ):
__SCREAMING_SNAKE_CASE : str = text[i : i + self.break_key]
__SCREAMING_SNAKE_CASE : Dict = [self.replace_letters(_A ) for char in batch]
__SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.array([vec] ).T
__SCREAMING_SNAKE_CASE : str = self.modulus(self.encrypt_key.dot(_A ) ).T.tolist()[
0
]
__SCREAMING_SNAKE_CASE : List[str] = ''''''.join(
self.replace_digits(_A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__SCREAMING_SNAKE_CASE : str = det % len(self.key_string )
__SCREAMING_SNAKE_CASE : List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__SCREAMING_SNAKE_CASE : Optional[int] = i
break
__SCREAMING_SNAKE_CASE : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_A ) )
def UpperCAmelCase__ ( self : Any , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.make_decrypt_key()
__SCREAMING_SNAKE_CASE : str = self.process_text(text.upper() )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
for i in range(0 , len(_A ) - self.break_key + 1 , self.break_key ):
__SCREAMING_SNAKE_CASE : Optional[int] = text[i : i + self.break_key]
__SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(_A ) for char in batch]
__SCREAMING_SNAKE_CASE : Any = numpy.array([vec] ).T
__SCREAMING_SNAKE_CASE : Dict = self.modulus(decrypt_key.dot(_A ) ).T.tolist()[0]
__SCREAMING_SNAKE_CASE : str = ''''''.join(
self.replace_digits(_A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = int(input('''Enter the order of the encryption key: ''' ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = [int(snake_case ) for x in input().split()]
hill_matrix.append(snake_case )
__SCREAMING_SNAKE_CASE : Dict = HillCipher(numpy.array(snake_case ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
__SCREAMING_SNAKE_CASE : Dict = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
__SCREAMING_SNAKE_CASE : Union[str, Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(snake_case ) )
elif option == "2":
__SCREAMING_SNAKE_CASE : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 74 | from __future__ import annotations
import time
import numpy as np
A__ = [8, 5, 9, 7]
A__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : list[list[int]] , ):
lowerCamelCase :List[str] = claim_vector
lowerCamelCase :Tuple = allocated_resources_table
lowerCamelCase :Tuple = maximum_claim_table
def snake_case ( self : Union[str, Any] ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def snake_case ( self : Optional[int] ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def snake_case ( self : List[Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def snake_case ( self : List[Any] ):
return {self.__need().index(__snake_case ): i for i in self.__need()}
def snake_case ( self : Any , **__snake_case : Tuple ):
lowerCamelCase :Optional[Any] = self.__need()
lowerCamelCase :Optional[Any] = self.__allocated_resources_table
lowerCamelCase :Tuple = self.__available_resources()
lowerCamelCase :Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowerCamelCase :Dict = False
for each_need in need_list:
lowerCamelCase :Union[str, Any] = True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
lowerCamelCase :Union[str, Any] = False
break
if execution:
lowerCamelCase :Optional[int] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase :int = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
lowerCamelCase :Optional[Any] = np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def snake_case ( self : List[Any] ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(__snake_case ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(__snake_case ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''unispeech'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=0.5 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : Dict = hidden_size
A_ : Dict = feat_extract_norm
A_ : List[Any] = feat_extract_activation
A_ : Optional[int] = list(lowercase )
A_ : Any = list(lowercase )
A_ : Optional[int] = list(lowercase )
A_ : str = conv_bias
A_ : Any = num_conv_pos_embeddings
A_ : List[str] = num_conv_pos_embedding_groups
A_ : List[Any] = len(self.conv_dim )
A_ : List[str] = num_hidden_layers
A_ : List[str] = intermediate_size
A_ : str = hidden_act
A_ : int = num_attention_heads
A_ : str = hidden_dropout
A_ : List[str] = attention_dropout
A_ : Tuple = activation_dropout
A_ : Optional[Any] = feat_proj_dropout
A_ : Optional[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : int = layer_norm_eps
A_ : int = initializer_range
A_ : Any = num_ctc_classes
A_ : Optional[int] = vocab_size
A_ : Optional[Any] = do_stable_layer_norm
A_ : Union[str, Any] = use_weighted_layer_sum
A_ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : List[str] = mask_time_prob
A_ : List[Any] = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Union[str, Any] = mask_feature_prob
A_ : Any = mask_feature_length
A_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ : Any = num_codevectors_per_group
A_ : Dict = num_codevector_groups
A_ : Union[str, Any] = contrastive_logits_temperature
A_ : Tuple = feat_quantizer_dropout
A_ : Optional[int] = num_negatives
A_ : Union[str, Any] = codevector_dim
A_ : Tuple = proj_codevector_dim
A_ : Union[str, Any] = diversity_loss_weight
# ctc loss
A_ : Any = ctc_loss_reduction
A_ : int = ctc_zero_infinity
# pretraining loss
A_ : Optional[int] = replace_prob
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__magic_name__ : Dict = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a_ ( lowercase__ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a_ ( lowercase__ :Any, lowercase__ :Dict ):
if args.student_type == "roberta":
__lowerCamelCase = False
elif args.student_type == "gpt2":
__lowerCamelCase = False
def a_ ( lowercase__ :Optional[int], lowercase__ :Optional[Any] ):
if args.student_type == "roberta":
__lowerCamelCase = False
def a_ ( ):
__lowerCamelCase = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=__a, required=__a, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=__a, required=__a, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=__a, choices=["""distilbert""", """roberta""", """gpt2"""], required=__a, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=__a, required=__a, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=__a, type=__a, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=__a, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=__a, required=__a, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=__a, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=__a, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=__a, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=__a, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=__a, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=__a, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.15, type=__a, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=__a, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=__a, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=__a, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=__a, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=__a, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.""", )
parser.add_argument("""--n_epoch""", type=__a, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=__a, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=__a, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.05, type=__a, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=__a, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=__a, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=__a, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=__a, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.02, type=__a, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=__a, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=__a, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=__a, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=__a, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=__a, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=__a, default=4000, help="""Checkpoint interval.""" )
__lowerCamelCase = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(__a ), __a, indent=4 )
git_log(args.dump_path )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = MODEL_CLASSES[args.student_type]
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowerCamelCase = tokenizer.all_special_tokens.index(__a )
__lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
__lowerCamelCase = special_tok_ids
__lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file, """rb""" ) as fp:
__lowerCamelCase = pickle.load(__a )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts, """rb""" ) as fp:
__lowerCamelCase = pickle.load(__a )
__lowerCamelCase = np.maximum(__a, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowerCamelCase = 0.0 # do not predict special tokens
__lowerCamelCase = torch.from_numpy(__a )
else:
__lowerCamelCase = None
__lowerCamelCase = LmSeqsDataset(params=__a, data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
__lowerCamelCase = student_config_class.from_pretrained(args.student_config )
__lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
__lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights, config=__a )
else:
__lowerCamelCase = student_model_class(__a )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
__lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a, __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a, __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowerCamelCase = Distiller(
params=__a, dataset=__a, token_probs=__a, student=__a, teacher=__a )
distiller.train()
logger.info("""Let\'s go get some drinks.""" )
if __name__ == "__main__":
main()
| 281 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__snake_case : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 571 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _lowerCamelCase ( ):
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase__ : Union[str, Any] = parser.parse_args()
return args.f
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> None:
lowercase__ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[Any] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCamelCase__ )
lowercase__ : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase__ )
lowercase__ : int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCamelCase__ ) | 128 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__snake_case = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Union[str, Any] = '''dummy_data'''
_a : Any = '''datasets'''
_a : List[str] = False
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = dataset_name
lowercase__ : Optional[Any] = cache_dir
lowercase__ : Optional[int] = use_local_dummy_data
lowercase__ : Optional[Any] = config
# download_callbacks take a single url as input
lowercase__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase__ : List[str] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase__ : int = str(lowerCamelCase__ )
# to be downloaded
lowercase__ : Tuple = None
lowercase__ : Dict = None
@property
def UpperCAmelCase__( self ) -> List[str]:
if self._dummy_file is None:
lowercase__ : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__( self ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCAmelCase__( self ) -> int:
lowercase__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase__ : int = cached_path(
lowerCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=lowerCamelCase__ , force_extract=lowerCamelCase__ )
return os.path.join(lowerCamelCase__ , self.dummy_file_name )
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
if self._bucket_url is None:
lowercase__ : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ ) -> Union[str, Any]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase__ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase__ : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return self.create_dummy_data_dict(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self.create_dummy_data_list(lowerCamelCase__ , lowerCamelCase__ )
else:
return self.create_dummy_data_single(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ ) -> Optional[int]:
return self.download_and_extract(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return self.download_and_extract(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
return path
def UpperCAmelCase__( self ) -> int:
return {}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for single_url in single_urls:
download_callback(lowerCamelCase__ )
else:
lowercase__ : Dict = single_urls
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Any = [os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) ) for x in single_urls]
else:
lowercase__ : Any = single_urls
lowercase__ : int = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) )
lowercase__ : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase__ : Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowercase__ : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowerCamelCase__ ) ) for url in data_url )
lowercase__ : Optional[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase__ : List[str] = [data_url[0]] * len(lowerCamelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ : Dict = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowerCamelCase__ )
return dummy_data_list
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for download_callback in self.download_callbacks:
download_callback(lowerCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowerCamelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__( self ) -> str:
pass
def UpperCAmelCase__( self ) -> Optional[Any]:
pass
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
def _iter_archive_members(lowerCamelCase__ ):
# this preserves the order of the members inside the ZIP archive
lowercase__ : Optional[int] = Path(self.dummy_file ).parent
lowercase__ : int = path.relative_to(lowerCamelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase__ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCamelCase__ )
lowercase__ : List[str] = Path(lowerCamelCase__ )
lowercase__ : Optional[int] = _iter_archive_members(lowerCamelCase__ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowerCamelCase__ ).as_posix(), file_path.open("""rb""" )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[Any]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Union[str, Any] = [paths]
for path in paths:
if os.path.isfile(lowerCamelCase__ ):
if os.path.basename(lowerCamelCase__ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCamelCase__ ):
if os.path.basename(lowerCamelCase__ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowerCamelCase__ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowerCamelCase__ , lowerCamelCase__ ) | 128 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase_ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ : Any = os.path.join(git_repo_path, 'src', 'transformers')
lowerCAmelCase_ : int = '\n{0} = None\n'
lowerCAmelCase_ : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
lowerCAmelCase_ : Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
SCREAMING_SNAKE_CASE : Optional[int] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
SCREAMING_SNAKE_CASE : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
SCREAMING_SNAKE_CASE : Dict = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
SCREAMING_SNAKE_CASE : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 527 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( A : list[int] , A : int ):
if len(A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
SCREAMING_SNAKE_CASE : Dict = sum(array[:k] )
for i in range(len(A ) - k ):
SCREAMING_SNAKE_CASE : Optional[Any] = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ : Optional[int] = [randint(-1000, 1000) for i in range(100)]
lowerCAmelCase_ : Optional[int] = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 527 | 1 |
from math import sqrt
def __lowerCamelCase ( __a : Tuple ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( __a : Optional[int] = 10_001 ):
_lowercase =0
_lowercase =1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 | import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowerCAmelCase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCAmelCase__ = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase__ = BeautifulSoup(res.text, "html.parser")
lowerCAmelCase__ = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 594 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE_:Tuple = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 'retribert'
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=8, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=True, lowerCamelCase__=128, lowerCamelCase__=0, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : Optional[Any] = vocab_size
A : Tuple = hidden_size
A : str = num_hidden_layers
A : List[str] = num_attention_heads
A : List[str] = hidden_act
A : int = intermediate_size
A : str = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Any = initializer_range
A : int = layer_norm_eps
A : Any = share_encoders
A : Optional[Any] = projection_dim
| 662 |
def __a ( __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = min(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def __a ( __lowerCAmelCase ) -> list[float]:
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = final_scores[j] + ele
return final_scores
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : Tuple = get_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data | 352 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a : Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a : int = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a : List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self , snake_case , snake_case , snake_case = 1 , snake_case = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_a , hypotheses=_a , min_len=_a , max_len=_a )
}
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 609 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
def __init__( self : Dict, *__lowerCamelCase : Optional[int], **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
requires_backends(self, '''decord''' )
self.check_model_type(__lowerCamelCase )
def __lowercase( self : Dict, __lowerCamelCase : Optional[Any]=None, __lowerCamelCase : int=None, __lowerCamelCase : Tuple=None ) -> List[str]:
UpperCamelCase__ : str = {}
if frame_sampling_rate is not None:
UpperCamelCase__ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ : Union[str, Any] = num_frames
UpperCamelCase__ : List[str] = {}
if top_k is not None:
UpperCamelCase__ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str, __lowerCamelCase : Union[str, List[str]], **__lowerCamelCase : List[Any] ) -> int:
return super().__call__(__lowerCamelCase, **__lowerCamelCase )
def __lowercase( self : Union[str, Any], __lowerCamelCase : Dict, __lowerCamelCase : Dict=None, __lowerCamelCase : Optional[int]=1 ) -> int:
if num_frames is None:
UpperCamelCase__ : Optional[int] = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
UpperCamelCase__ : List[str] = BytesIO(requests.get(__lowerCamelCase ).content )
UpperCamelCase__ : str = VideoReader(__lowerCamelCase )
videoreader.seek(0 )
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Tuple = num_frames * frame_sampling_rate - 1
UpperCamelCase__ : Tuple = np.linspace(__lowerCamelCase, __lowerCamelCase, num=__lowerCamelCase, dtype=np.intaa )
UpperCamelCase__ : str = videoreader.get_batch(__lowerCamelCase ).asnumpy()
UpperCamelCase__ : Optional[Any] = list(__lowerCamelCase )
UpperCamelCase__ : str = self.image_processor(__lowerCamelCase, return_tensors=self.framework )
return model_inputs
def __lowercase( self : Optional[Any], __lowerCamelCase : Optional[Any] ) -> List[str]:
UpperCamelCase__ : Dict = self.model(**__lowerCamelCase )
return model_outputs
def __lowercase( self : List[str], __lowerCamelCase : Dict, __lowerCamelCase : Union[str, Any]=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCamelCase__ : Dict = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ : Dict = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ ,UpperCamelCase__ : Dict = probs.topk(__lowerCamelCase )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
UpperCamelCase__ : Dict = scores.tolist()
UpperCamelCase__ : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCamelCase, __lowerCamelCase )]
| 344 |
_SCREAMING_SNAKE_CASE : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355_818,
}
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ : List[str] = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(__lowerCamelCase )}'
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCamelCase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase = Accelerator()
__UpperCamelCase = (accelerator.state.process_index + 2, 1_0)
__UpperCamelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
__UpperCamelCase = ''
__UpperCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 185 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = FlaxAutoencoderKL
@property
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
| 185 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
A_ : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
A_ : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments,) )
((A) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A = True
A = True
A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A = decoder_config.decoder_start_token_id
A = decoder_config.pad_token_id
if decoder_start_token_id is None:
A = decoder_config.bos_token_id
if pad_token_id is None:
A = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A = decoder_config.eos_token_id
A = decoder_start_token_id
A = pad_token_id
A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 106 | '''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE : int = x_den * y_den * z_den
SCREAMING_SNAKE_CASE : int = gcd(a_ , a_ )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase ( a_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : set = set()
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Fraction = Fraction(0 )
SCREAMING_SNAKE_CASE : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE : Optional[Any] = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE : Optional[int] = x_den * y_den
SCREAMING_SNAKE_CASE : str = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : List[Any] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : Dict = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Any = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Dict = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=-1
SCREAMING_SNAKE_CASE : Any = x_num * y_num
SCREAMING_SNAKE_CASE : List[str] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE : List[Any] = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : Any = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : str = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Dict = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
for num, den in unique_s:
total += Fraction(a_ , a_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Dict = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'mra'
def __init__( self : List[str], _snake_case : int=50_265, _snake_case : Optional[Any]=768, _snake_case : str=12, _snake_case : str=12, _snake_case : Union[str, Any]=3_072, _snake_case : int="gelu", _snake_case : Union[str, Any]=0.1, _snake_case : Optional[int]=0.1, _snake_case : List[str]=512, _snake_case : List[Any]=1, _snake_case : Optional[int]=0.02, _snake_case : Union[str, Any]=1E-5, _snake_case : str="absolute", _snake_case : Optional[int]=4, _snake_case : int="full", _snake_case : Any=0, _snake_case : Union[str, Any]=0, _snake_case : Union[str, Any]=1, _snake_case : Tuple=0, _snake_case : List[str]=2, **_snake_case : Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, **_snake_case )
snake_case : Optional[Any] =vocab_size
snake_case : List[str] =max_position_embeddings
snake_case : int =hidden_size
snake_case : str =num_hidden_layers
snake_case : Optional[int] =num_attention_heads
snake_case : Any =intermediate_size
snake_case : List[str] =hidden_act
snake_case : Union[str, Any] =hidden_dropout_prob
snake_case : Tuple =attention_probs_dropout_prob
snake_case : int =initializer_range
snake_case : Dict =type_vocab_size
snake_case : Union[str, Any] =layer_norm_eps
snake_case : Any =position_embedding_type
snake_case : List[str] =block_per_row
snake_case : str =approx_mode
snake_case : Dict =initial_prior_first_n_blocks
snake_case : List[str] =initial_prior_diagonal_n_blocks
| 136 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( lowerCamelCase_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case : str =model_type_to_module_name(lowerCamelCase_ )
snake_case : int =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '''__name__''' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case : List[str] =importlib.import_module('''transformers''' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def _a ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ):
snake_case : List[str] =get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCamelCase_ )
class lowerCAmelCase_ :
def __init__( self : List[Any] ):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def __snake_case ( cls : Union[str, Any], _snake_case : Union[str, Any], **_snake_case : Any ):
'''simple docstring'''
snake_case : Any =kwargs.pop('''config''', _snake_case )
snake_case : Optional[Any] =kwargs.pop('''trust_remote_code''', _snake_case )
snake_case : List[Any] =True
snake_case , snake_case : Dict =FeatureExtractionMixin.get_feature_extractor_dict(_snake_case, **_snake_case )
snake_case : str =config_dict.get('''feature_extractor_type''', _snake_case )
snake_case : str =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ):
snake_case : Optional[Any] =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case, _snake_case ):
snake_case : Optional[Any] =AutoConfig.from_pretrained(_snake_case, **_snake_case )
# It could be in `config.feature_extractor_type``
snake_case : Any =getattr(_snake_case, '''feature_extractor_type''', _snake_case )
if hasattr(_snake_case, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case : Union[str, Any] =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case : Tuple =feature_extractor_class_from_name(_snake_case )
snake_case : Optional[int] =feature_extractor_auto_map is not None
snake_case : Optional[int] =feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
snake_case : Dict =resolve_trust_remote_code(
_snake_case, _snake_case, _snake_case, _snake_case )
if has_remote_code and trust_remote_code:
snake_case : Optional[Any] =get_class_from_dynamic_module(
_snake_case, _snake_case, **_snake_case )
snake_case : List[Any] =kwargs.pop('''code_revision''', _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
snake_case : List[Any] =FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __snake_case ( _snake_case : List[str], _snake_case : int ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(_snake_case, _snake_case )
| 136 | 1 |
'''simple docstring'''
class _snake_case :
def __init__( self ):
UpperCAmelCase_ : Tuple = ""
UpperCAmelCase_ : Tuple = ""
UpperCAmelCase_ : List[Any] = []
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ : int = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
else:
UpperCAmelCase_ : int = self.__min_dist_top_down_dp(__UpperCAmelCase ,n - 1 )
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 ,__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
UpperCAmelCase_ : Optional[Any] = 1 + min(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
return self.dp[m][n]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = worda
UpperCAmelCase_ : str = worda
UpperCAmelCase_ : Optional[int] = [[-1 for _ in range(len(__UpperCAmelCase ) )] for _ in range(len(__UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCAmelCase ) - 1 ,len(__UpperCAmelCase ) - 1 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = worda
UpperCAmelCase_ : int = worda
UpperCAmelCase_ : Union[str, Any] = len(__UpperCAmelCase )
UpperCAmelCase_ : Any = len(__UpperCAmelCase )
UpperCAmelCase_ : Dict = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ : Dict = j
elif j == 0: # second string is empty
UpperCAmelCase_ : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ : int = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ : Tuple = self.dp[i][j - 1]
UpperCAmelCase_ : Union[str, Any] = self.dp[i - 1][j]
UpperCAmelCase_ : Any = self.dp[i - 1][j - 1]
UpperCAmelCase_ : Optional[Any] = 1 + min(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCamelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_lowerCamelCase = input("""Enter the first string: """).strip()
_lowerCamelCase = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 71 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[float] = field(
default=0.0 ,metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'whether to use adafactor'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(default=A__ ,metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[str] = field(
default='linear' ,metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} ,)
| 196 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( a__):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'embed_dim' ) )
self.parent.assertTrue(hasattr(A, 'num_heads' ) )
class __snake_case :
def __init__( self, A, A=13, A=64, A=3, A=[16, 48, 96], A=[1, 3, 6], A=[1, 2, 10], A=[7, 3, 3], A=[4, 2, 2], A=[2, 1, 1], A=[2, 2, 2], A=[False, False, True], A=[0.0, 0.0, 0.0], A=0.02, A=1e-12, A=True, A=True, A=2, ):
"""simple docstring"""
lowerCamelCase : List[str] = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : Dict = image_size
lowerCamelCase : Optional[Any] = patch_sizes
lowerCamelCase : str = patch_stride
lowerCamelCase : Union[str, Any] = patch_padding
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Optional[Any] = embed_dim
lowerCamelCase : int = num_heads
lowerCamelCase : List[Any] = stride_kv
lowerCamelCase : Any = depth
lowerCamelCase : Any = cls_token
lowerCamelCase : List[Any] = attention_drop_rate
lowerCamelCase : str = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = CvtModel(config=A )
model.to(A )
model.eval()
lowerCamelCase : Any = model(A )
lowerCamelCase : Union[str, Any] = (self.image_size, self.image_size)
lowerCamelCase , lowerCamelCase : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCamelCase : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCamelCase : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.num_labels
lowerCamelCase : Optional[Any] = CvtForImageClassification(A )
model.to(A )
model.eval()
lowerCamelCase : int = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = config_and_inputs
lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a__ , a__ , unittest.TestCase):
_lowerCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = CvtModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Tuple = model_class(A )
lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(A, A, A ):
lowerCamelCase : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(A, A ) )
lowerCamelCase : Union[str, Any] = outputs.hidden_states
lowerCamelCase : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(A ), A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[str] = True
check_hidden_states_output(A, A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[int] = CvtModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A )
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**A )
# verify the logits
lowerCamelCase : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, A )
lowerCamelCase : Any = torch.tensor([0.9285, 0.9015, -0.3150] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1e-4 ) )
| 449 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a__):
_lowerCAmelCase = (DPMSolverSinglestepScheduler,)
_lowerCAmelCase = (('''num_inference_steps''', 25),)
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**A )
return config
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : Dict = 0.1 * sample
lowerCamelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : List[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : Optional[int] = sample, sample
for t in range(A, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : Dict = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Optional[int] = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : str = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : List[str] = 0.1 * sample
lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : Tuple = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : int = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Dict = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self, A=None, **A ):
"""simple docstring"""
if scheduler is None:
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : Any = 10
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = model(A, A )
lowerCamelCase : List[str] = scheduler.step(A, A, A ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : Dict = 50
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase : Any = model(A, A )
lowerCamelCase : Optional[int] = scheduler.step(A, A, A ).prev_sample
lowerCamelCase : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowerCamelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A, prediction_type=A, sample_max_value=A, algorithm_type='dpmsolver++', solver_order=A, solver_type=A, )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
lowerCamelCase : Optional[Any] = self.full_loop(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=A )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A, time_step=0 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop()
lowerCamelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop(use_karras_sigmas=A )
lowerCamelCase : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase : Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=A )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config(thresholding=A, dynamic_thresholding_ratio=0 )
lowerCamelCase : str = scheduler_class(**A )
lowerCamelCase : List[Any] = 10
lowerCamelCase : List[str] = self.dummy_model()
lowerCamelCase : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : str = model(A, A )
lowerCamelCase : Tuple = scheduler.step(A, A, A ).prev_sample
assert sample.dtype == torch.floataa
| 449 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Any = 10
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(__snake_case , __snake_case ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(__snake_case )
while left <= right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case_ = (left + right) // 3 + 1
snake_case_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case_ = one_third - 1
elif array[two_third] < target:
snake_case_ = two_third + 1
else:
snake_case_ = one_third + 1
snake_case_ = two_third - 1
else:
return -1
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case_ = (left + right) // 3 + 1
snake_case_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__snake_case , one_third - 1 , __snake_case , __snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __snake_case , __snake_case , __snake_case )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __snake_case , __snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : int = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_UpperCAmelCase : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : str = ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 362 | '''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : int ) -> Dict:
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Dict:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# pass variant but use the non-variant filenames
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> str:
# pass variant but use the non-variant filenames
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
| 107 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase = year // 1_00
UpperCamelCase = (5 * (century % 4) + 2) % 7
UpperCamelCase = year % 1_00
UpperCamelCase = centurian % 12
UpperCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[List[PIL.Image.Image], np.ndarray]
UpperCamelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : np.ndarray
UpperCamelCase_ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 556 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : Union[str, Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[Any] = max_length
snake_case : Union[str, Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : List[str] = start_length
snake_case : str = max_new_tokens
snake_case : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Any = max_time
snake_case : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[str] = stopping_criteria.max_length
snake_case : int = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 36 |
def UpperCamelCase ( snake_case__ : list ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case :str = grid[0]
for row_n in range(1 ,len(snake_case__ ) ):
__snake_case :Optional[int] = grid[row_n]
__snake_case :Optional[Any] = fill_row(snake_case__ ,snake_case__ )
__snake_case :Dict = grid[row_n]
return grid[-1][-1]
def UpperCamelCase ( snake_case__ : list ,snake_case__ : list ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Dict = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
_UpperCAmelCase : List[Any] = {"""mobilebert-uncased""": 5_1_2}
_UpperCAmelCase : Union[str, Any] = {}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = MobileBertTokenizer
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(__lowercase , normalizer_state.pop('''type''' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**__lowercase )
__lowerCAmelCase = do_lower_case
def _snake_case (self , __lowercase , __lowercase=None ):
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 707 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCAmelCase : str = 6_37_81_37.0
_UpperCAmelCase : Tuple = 6_35_67_52.31_42_45
_UpperCAmelCase : Optional[Any] = 6_3_7_8_1_3_7
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
__lowerCAmelCase = atan((1 - flattening) * tan(radians(lowerCamelCase)))
__lowerCAmelCase = atan((1 - flattening) * tan(radians(lowerCamelCase)))
__lowerCAmelCase = radians(lowerCamelCase)
__lowerCAmelCase = radians(lowerCamelCase)
# Equation
__lowerCAmelCase = sin((phi_a - phi_a) / 2)
__lowerCAmelCase = sin((lambda_a - lambda_a) / 2)
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCAmelCase = sqrt(sin_sq_phi + (cos(lowerCamelCase) * cos(lowerCamelCase) * sin_sq_lambda))
return 2 * RADIUS * asin(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = '▁'
A = {'vocab_file': 'sentencepiece.bpe.model'}
A = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
A = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model ) + self.fairseq_offset
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 187 |
from jiwer import compute_measures
import datasets
A = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __lowerCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase )["wer"]
else:
snake_case_ = 0
snake_case_ = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = compute_measures(__UpperCamelCase , __UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 187 | 1 |
import os
def lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = os.path.join(os.path.dirname(UpperCamelCase__ ) , '''num.txt''' )
with open(UpperCamelCase__ ) as file_hand:
return str(sum(int(UpperCamelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 146 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : List[str] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[int] = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCAmelCase : Optional[int] = {f'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : int = FunnelTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = 2
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase="##" , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , clean_text=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , wordpieces_prefix=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCAmelCase ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE: Any = getattr(_lowerCAmelCase , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE: Tuple = do_lower_case
__SCREAMING_SNAKE_CASE: int = strip_accents
__SCREAMING_SNAKE_CASE: int = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE: List[Any] = normalizer_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = do_lower_case
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 146 | 1 |
def A ( __UpperCamelCase ) -> tuple[int, int]:
try:
A__ = float(__UpperCamelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
A__ = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
A__ = len(str(__UpperCamelCase ).split('.' )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__ , A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__ , A__ = divisor, remainder
A__ , A__ = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 9 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase_ = NewType("""DataClass""", Any)
UpperCamelCase_ = NewType("""DataClassType""", Any)
def _lowerCAmelCase ( __magic_name__ : Dict ) -> str:
if isinstance(__magic_name__ , __magic_name__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _lowerCAmelCase ( __magic_name__ : list ) -> Callable[[str], Any]:
lowercase : Optional[Any] ={str(__magic_name__ ): choice for choice in choices}
return lambda __magic_name__ : str_to_choice.get(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( *,
__magic_name__ : Union[str, List[str]] = None , __magic_name__ : str = None , __magic_name__ : Any = dataclasses.MISSING , __magic_name__ : Callable[[], Any] = dataclasses.MISSING , __magic_name__ : dict = None , **__magic_name__ : Optional[Any] , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase : Tuple ={}
if aliases is not None:
lowercase : Dict =aliases
if help is not None:
lowercase : Optional[Any] =help
return dataclasses.field(metadata=__magic_name__ , default=__magic_name__ , default_factory=__magic_name__ , **__magic_name__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 42
def __init__( self : Tuple , UpperCAmelCase__ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase : Dict =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase__ )
if dataclasses.is_dataclass(UpperCAmelCase__ ):
lowercase : List[str] =[dataclass_types]
lowercase : Any =list(UpperCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : ArgumentParser , UpperCAmelCase__ : dataclasses.Field ):
'''simple docstring'''
lowercase : Optional[int] =F'''--{field.name}'''
lowercase : Optional[int] =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowercase : Dict =kwargs.pop('''aliases''' , [] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Dict =[aliases]
lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase__ , '''UnionType''' ) and isinstance(UpperCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(UpperCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
lowercase : Optional[int] =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase : Union[str, Any] =getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase : Dict =(
field.type.__args__[0] if isinstance(UpperCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase : Optional[int] =getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase : Union[str, Any] ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase__ ) and issubclass(field.type , UpperCAmelCase__ )):
if origin_type is Literal:
lowercase : Tuple =field.type.__args__
else:
lowercase : Optional[Any] =[x.value for x in field.type]
lowercase : str =make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowercase : List[Any] =field.default
else:
lowercase : List[str] =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase : Optional[Any] =copy(UpperCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
lowercase : str =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase : List[str] =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase : int =default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase : List[str] ='''?'''
# This is the value that will get picked if we do --field_name (without value)
lowercase : str =True
elif isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : int =field.type.__args__[0]
lowercase : Optional[int] ='''+'''
if field.default_factory is not dataclasses.MISSING:
lowercase : List[str] =field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase : Union[str, Any] =True
else:
lowercase : List[Any] =field.type
if field.default is not dataclasses.MISSING:
lowercase : Any =field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase : Dict =field.default_factory()
else:
lowercase : List[str] =True
parser.add_argument(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase : List[Any] =False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DataClassType ):
'''simple docstring'''
if hasattr(UpperCAmelCase__ , '''_argument_group_name''' ):
lowercase : List[Any] =self.add_argument_group(dtype._argument_group_name )
else:
lowercase : str =self
try:
lowercase : Dict[str, type] =get_type_hints(UpperCAmelCase__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase__ ):
lowercase : Dict ='''.'''.join(map(UpperCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase__ ):
if not field.init:
continue
lowercase : int =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase : Optional[Any] =[]
if args_filename:
args_files.append(Path(UpperCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase : int =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase__ , type=UpperCAmelCase__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase , lowercase : Any =args_file_parser.parse_known_args(args=UpperCAmelCase__ )
lowercase : str =vars(UpperCAmelCase__ ).get(args_file_flag.lstrip('''-''' ) , UpperCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase__ ) for p in cmd_args_file_paths] )
lowercase : List[Any] =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase : Optional[Any] =file_args + args if args is not None else file_args + sys.argv[1:]
lowercase , lowercase : Union[str, Any] =self.parse_known_args(args=UpperCAmelCase__ )
lowercase : Union[str, Any] =[]
for dtype in self.dataclass_types:
lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
lowercase : int ={k: v for k, v in vars(UpperCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict[str, Any] , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
lowercase : List[str] =set(args.keys() )
lowercase : Dict =[]
for dtype in self.dataclass_types:
lowercase : int ={f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
lowercase : Dict ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase : str =dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase__ )}''' )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
with open(Path(UpperCAmelCase__ ) , encoding='''utf-8''' ) as open_json_file:
lowercase : str =json.loads(open_json_file.read() )
lowercase : Tuple =self.parse_dict(UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
lowercase : Union[str, Any] =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase__ ).read_text() ) , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Dict = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :str , snake_case_ :Any=False ):
__UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Any , snake_case_ :Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def lowercase__ ( ):
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Union[str, Any] , snake_case_ :Tuple=True ):
__UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCAmelCase = 8
# set labels if required
if not base_model:
__UpperCAmelCase = 1_000
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCAmelCase = 384
__UpperCAmelCase = 1_536
__UpperCAmelCase = 12
__UpperCAmelCase = 6
# load original model from torch hub
__UpperCAmelCase = torch.hub.load('''facebookresearch/dino:main''' , snake_case_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
__UpperCAmelCase = create_rename_keys(snake_case_ , base_model=snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
if base_model:
__UpperCAmelCase = ViTModel(snake_case_ , add_pooling_layer=snake_case_ ).eval()
else:
__UpperCAmelCase = ViTForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCAmelCase = ViTImageProcessor()
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase = encoding['''pixel_values''']
__UpperCAmelCase = model(snake_case_ )
if base_model:
__UpperCAmelCase = original_model(snake_case_ )
assert torch.allclose(snake_case_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__UpperCAmelCase = original_model(snake_case_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_lowercase : str = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 49 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __get__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=None) -> int:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_UpperCamelCase = '__cached_' + self.fget.__name__
_UpperCamelCase = getattr(__a , __a , __a)
if cached is None:
_UpperCamelCase = self.fget(__a)
setattr(__a , __a , __a)
return cached
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if is_torch_fx_proxy(a__ ):
return True
if is_torch_available():
import torch
if isinstance(a__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(a__ , np.ndarray )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return isinstance(a__ , np.ndarray )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
return _is_numpy(a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
import torch
return isinstance(a__ , torch.Tensor )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(a__ )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
import torch
return isinstance(a__ , torch.device )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import torch
if isinstance(a__ , a__ ):
if hasattr(a__ , a__ ):
_UpperCamelCase = getattr(a__ , a__ )
else:
return False
return isinstance(a__ , torch.dtype )
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import tensorflow as tf
return isinstance(a__ , tf.Tensor )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(a__ )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(a__ )
return type(a__ ) == tf.Tensor
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(a__ , jnp.ndarray )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(a__ )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(a__ , (dict, UserDict) ):
return {k: to_py_obj(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return [to_py_obj(a__ ) for o in obj]
elif is_tf_tensor(a__ ):
return obj.numpy().tolist()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a__ ):
return np.asarray(a__ ).tolist()
elif isinstance(a__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
if isinstance(a__ , (dict, UserDict) ):
return {k: to_numpy(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return np.array(a__ )
elif is_tf_tensor(a__ ):
return obj.numpy()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a__ ):
return np.asarray(a__ )
else:
return obj
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.')
_UpperCamelCase = getattr(self , class_fields[0].name)
_UpperCamelCase = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a , __a):
_UpperCamelCase = first_field.items()
_UpperCamelCase = True
else:
try:
_UpperCamelCase = iter(__a)
_UpperCamelCase = True
except TypeError:
_UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a , (list, tuple))
or not len(__a) == 2
or not isinstance(element[0] , __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
_UpperCamelCase = element[1]
elif first_field is not None:
_UpperCamelCase = first_field
else:
for field in class_fields:
_UpperCamelCase = getattr(self , field.name)
if v is not None:
_UpperCamelCase = v
def __delitem__( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Dict , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Any , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self : Optional[int] , lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
if isinstance(__a , __a):
_UpperCamelCase = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a , __a)
super().__setattr__(__a , __a)
def __setitem__( self : Dict , lowercase_ : str , lowercase_ : int) -> Dict:
"""simple docstring"""
super().__setitem__(__a , __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a , __a)
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys())
class _UpperCAmelCase ( UpperCamelCase_, UpperCamelCase_ ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : List[str] , lowercase_ : str) -> List[str]:
"""simple docstring"""
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__A = '''longest'''
__A = '''max_length'''
__A = '''do_not_pad'''
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__A = '''pt'''
__A = '''tf'''
__A = '''np'''
__A = '''jax'''
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = context_managers
_UpperCamelCase = ExitStack()
def __enter__( self : str) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
self.stack.__exit__(*__a , **__a)
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = model_class.__name__
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase__ ( a__ , a__ = "" , a__ = "." ) ->Any:
'''simple docstring'''
def _flatten_dict(a__ , a__="" , a__="." ):
for k, v in d.items():
_UpperCamelCase = str(a__ ) + delimiter + str(a__ ) if parent_key else k
if v and isinstance(a__ , a__ ):
yield from flatten_dict(a__ , a__ , delimiter=a__ ).items()
else:
yield key, v
return dict(_flatten_dict(a__ , a__ , a__ ) )
@contextmanager
def lowerCAmelCase__ ( a__ , a__ = False ) ->Union[str, Any]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase__ ( a__ , a__=None ) ->List[Any]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.transpose(a__ , axes=a__ )
elif is_torch_tensor(a__ ):
return array.T if axes is None else array.permute(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.transpose(a__ , perm=a__ )
elif is_jax_tensor(a__ ):
return jnp.transpose(a__ , axes=a__ )
else:
raise ValueError(f'Type not supported for transpose: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.reshape(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.reshape(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.reshape(a__ , a__ )
elif is_jax_tensor(a__ ):
return jnp.reshape(a__ , a__ )
else:
raise ValueError(f'Type not supported for reshape: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__=None ) ->Union[str, Any]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.squeeze(a__ , axis=a__ )
elif is_torch_tensor(a__ ):
return array.squeeze() if axis is None else array.squeeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.squeeze(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.squeeze(a__ , axis=a__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.expand_dims(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.unsqueeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.expand_dims(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.expand_dims(a__ , axis=a__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.size(a__ )
elif is_torch_tensor(a__ ):
return array.numel()
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.size(a__ )
elif is_jax_tensor(a__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(a__ , (tuple, list) ):
_UpperCamelCase = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCamelCase = f'{repo_id}--{value}'
return auto_map
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
for base_class in inspect.getmro(a__ ):
_UpperCamelCase = base_class.__module__
_UpperCamelCase = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
a__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a__ : Union[str, Any] = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =CamembertTokenizer
_lowerCamelCase =CamembertTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : int ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(a__ ) , 1004 )
def __snake_case ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __snake_case ( self : Dict ):
UpperCAmelCase = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=a__ , )
| 51 | from __future__ import annotations
from random import choice
def _lowerCamelCase ( snake_case ):
return choice(snake_case )
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = random_pivot(snake_case )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case ) < k - 1:
return kth_number(snake_case , k - len(snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : Tuple ,__A : Dict=13 ,__A : Union[str, Any]=7 ,__A : Optional[Any]=True ,__A : Dict=True ,__A : List[str]=True ,__A : int=True ,__A : str=99 ,__A : Tuple=32 ,__A : List[Any]=2 ,__A : Dict=4 ,__A : List[str]=37 ,__A : Dict="gelu" ,__A : Optional[Any]=0.1 ,__A : Dict=0.1 ,__A : Dict=512 ,__A : Any=16 ,__A : Tuple=2 ,__A : List[Any]=0.02 ,__A : Any=3 ,__A : Tuple=4 ,__A : Dict=None ,__A : List[Any]=0 ,) -> Optional[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
_lowercase = projection_dim
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
_lowercase = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ,__A : str ,__A : Any ,__A : Dict ,__A : Tuple ,__A : Tuple ,__A : int ) -> List[str]:
_lowercase = TFDPRContextEncoder(config=__A )
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A )
_lowercase = model(__A ,token_type_ids=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] ,__A : Optional[Any] ,__A : Tuple ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Tuple ,__A : Any ) -> str:
_lowercase = TFDPRQuestionEncoder(config=__A )
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A )
_lowercase = model(__A ,token_type_ids=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int ,__A : List[str] ,__A : Optional[int] ,__A : List[str] ,__A : str ,__A : str ,__A : int ,__A : int ) -> Optional[Any]:
_lowercase = TFDPRReader(config=__A )
_lowercase = model(__A ,attention_mask=__A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def __UpperCAmelCase ( self : str ) -> str:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_lowercase = TFDPRModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__A )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__A )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFDPRQuestionEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFDPRReader.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : List[Any] ) -> int:
_lowercase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_lowercase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase = model(__A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 535 |
from ....utils import logging
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : int ,__A : List[Any]=None ,__A : List[str]=2048 ) -> List[Any]:
_lowercase = config.__dict__
_lowercase = modal_hidden_size
if num_labels:
_lowercase = num_labels | 535 | 1 |
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = size
lowerCAmelCase__ = [0] * size
lowerCAmelCase__ = [0] * size
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> Dict:
'''simple docstring'''
return (index & (index + 1)) - 1
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = value
while index < self.size:
lowerCAmelCase__ = self.get_prev(snake_case__ ) + 1
if current_left_border == index:
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = max(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = self.get_next(snake_case__ )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCAmelCase__ = 0
while left <= right:
lowerCAmelCase__ = self.get_prev(snake_case__ )
if left <= current_left:
lowerCAmelCase__ = max(snake_case__ , self.tree[right] )
lowerCAmelCase__ = current_left
else:
lowerCAmelCase__ = max(snake_case__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Tuple = "gelu"
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=14 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : List[Any]=0.02 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = ffn_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = TFXGLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int]=True ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowerCAmelCase__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowerCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = """left"""
# use different length sentences to test batching
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ )
lowerCAmelCase__ = inputs["""input_ids"""]
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 644 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __lowercase ( _A , _A ):
lowercase = 'bit'
lowercase = ['preactivation', 'bottleneck']
lowercase = ['SAME', 'VALID']
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : List[Any]=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase : Union[str, Any]=[3, 4, 6, 3] , __lowerCamelCase : List[Any]="preactivation" , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Any=False , __lowerCamelCase : str=32 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = global_padding
lowercase = num_groups
lowercase = drop_path_rate
lowercase = embedding_dynamic_padding
lowercase = output_stride
lowercase = width_factor
lowercase = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(__lowerCamelCase ) + 1 )]
lowercase ,lowercase = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 721 | import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __lowercase ( _A , _A ):
@register_to_config
def __init__( self : List[Any] , __lowerCamelCase : int = 1_28 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : float = 2000.0 , __lowerCamelCase : int = 7_68 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 20_48 , __lowerCamelCase : float = 0.1 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
lowercase = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
lowercase = False
lowercase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase = nn.Dropout(p=__lowerCamelCase )
lowercase = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
lowercase = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
lowercase = TaLayerNorm(__lowerCamelCase )
lowercase = nn.Dropout(p=__lowerCamelCase )
lowercase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def __a ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any ) -> List[Any]:
'''simple docstring'''
lowercase ,lowercase ,lowercase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase = self.position_encoding(__lowerCamelCase )
lowercase = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
lowercase = self.dropout(__lowerCamelCase )
# decoder: No padding present.
lowercase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
lowercase = self.decoder_norm(__lowerCamelCase )
lowercase = self.post_dropout(__lowerCamelCase )
lowercase = self.spec_out(__lowerCamelCase )
return spec_out
class __lowercase ( nn.Module ):
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=1E-6 ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def __a ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int=None , ) -> Any:
'''simple docstring'''
lowercase = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowercase = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
lowercase = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class __lowercase ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase = TaLayerNorm(__lowerCamelCase )
lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
lowercase = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
lowercase = nn.Dropout(__lowerCamelCase )
def __a ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
lowercase = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
lowercase = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
lowercase = self.attention(__lowerCamelCase )
lowercase = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__()
lowercase = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
lowercase = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
lowercase = nn.Dropout(__lowerCamelCase )
def __a ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ) -> List[Any]:
'''simple docstring'''
lowercase = self.layer_norm(__lowerCamelCase )
lowercase = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowercase = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class __lowercase ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
lowercase = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
lowercase = nn.Dropout(__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : str=None ) -> Optional[int]:
'''simple docstring'''
lowercase = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
lowercase = self.film(__lowerCamelCase , __lowerCamelCase )
lowercase = self.DenseReluDense(__lowerCamelCase )
lowercase = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowercase = nn.Dropout(__lowerCamelCase )
lowercase = NewGELUActivation()
def __a ( self : Any , __lowerCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = self.act(self.wi_a(__lowerCamelCase ) )
lowercase = self.wi_a(__lowerCamelCase )
lowercase = hidden_gelu * hidden_linear
lowercase = self.dropout(__lowerCamelCase )
lowercase = self.wo(__lowerCamelCase )
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=1E-6 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.Parameter(torch.ones(__lowerCamelCase ) )
lowercase = eps
def __a ( self : Any , __lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __lowercase ( nn.Module ):
def __a ( self : Dict , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class __lowercase ( nn.Module ):
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def __a ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = self.scale_bias(__lowerCamelCase )
lowercase ,lowercase = torch.chunk(__lowerCamelCase , 2 , -1 )
lowercase = x * (1 + scale) + shift
return x
| 479 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="Speech2TextFeatureExtractor"
UpperCAmelCase ="Speech2TextTokenizer"
def __init__( self , snake_case , snake_case) -> Tuple:
'''simple docstring'''
super().__init__(snake_case , snake_case)
_UpperCAmelCase : Tuple =self.feature_extractor
_UpperCAmelCase : Dict =False
def __call__( self , *snake_case , **snake_case) -> Union[str, Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case , **snake_case)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
_UpperCAmelCase : Any =kwargs.pop('raw_speech')
else:
_UpperCAmelCase : List[str] =kwargs.pop('audio' , snake_case)
_UpperCAmelCase : List[Any] =kwargs.pop('sampling_rate' , snake_case)
_UpperCAmelCase : Any =kwargs.pop('text' , snake_case)
if len(snake_case) > 0:
_UpperCAmelCase : Dict =args[0]
_UpperCAmelCase : List[str] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_UpperCAmelCase : List[str] =self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case)
if text is not None:
_UpperCAmelCase : List[str] =self.tokenizer(snake_case , **snake_case)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : Any =encodings['input_ids']
return inputs
def lowerCAmelCase ( self , *snake_case , **snake_case) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> str:
'''simple docstring'''
return self.tokenizer.decode(*snake_case , **snake_case)
@contextmanager
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : Optional[Any] =self.tokenizer
yield
_UpperCAmelCase : List[Any] =self.feature_extractor
_UpperCAmelCase : Any =False
| 446 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=1_0 , snake_case=3 , snake_case=3_2 * 8 , snake_case=3_2 * 8 , snake_case=4 , snake_case=6_4 , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =parent
_UpperCAmelCase : Optional[int] =batch_size
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Union[str, Any] =use_auxiliary_loss
_UpperCAmelCase : Dict =num_queries
_UpperCAmelCase : Tuple =num_channels
_UpperCAmelCase : Optional[Any] =min_size
_UpperCAmelCase : Any =max_size
_UpperCAmelCase : Optional[int] =num_labels
_UpperCAmelCase : Optional[int] =hidden_dim
_UpperCAmelCase : Dict =hidden_dim
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
snake_case)
_UpperCAmelCase : List[str] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case)
_UpperCAmelCase : int =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case) > 0.5
).float()
_UpperCAmelCase : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=snake_case) > 0.5).long()
_UpperCAmelCase : Tuple =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : int =self.num_queries
_UpperCAmelCase : int =self.num_labels
_UpperCAmelCase : List[Any] =[1, 1, 1, 1]
_UpperCAmelCase : int =self.num_channels
_UpperCAmelCase : List[Any] =6_4
_UpperCAmelCase : Optional[Any] =1_2_8
_UpperCAmelCase : List[Any] =self.hidden_dim
_UpperCAmelCase : Dict =self.hidden_dim
_UpperCAmelCase : int =self.hidden_dim
return config
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self , snake_case , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : int =output.encoder_hidden_states
_UpperCAmelCase : Dict =output.pixel_decoder_hidden_states
_UpperCAmelCase : List[Any] =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , config.decoder_layers)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=False) -> Any:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : Tuple =MaskaFormerModel(config=snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : List[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Union[str, Any] =model(snake_case , output_hidden_states=snake_case)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =MaskaFormerForUniversalSegmentation(config=snake_case)
model.to(snake_case)
model.eval()
def comm_check_on_output(snake_case):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_UpperCAmelCase : Optional[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Optional[Any] =model(snake_case)
comm_check_on_output(snake_case)
_UpperCAmelCase : str =model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case)
comm_check_on_output(snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase ={"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerModelTester(self)
_UpperCAmelCase : Dict =ConfigTester(self , config_class=snake_case , has_text_modality=snake_case)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] =model_class(snake_case)
_UpperCAmelCase : Optional[int] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str =[*signature.parameters.keys()]
_UpperCAmelCase : Tuple =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case)
@slow
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : Dict =MaskaFormerModel.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =(self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] ={
'pixel_values': torch.randn((2, 3, *size) , device=snake_case),
'mask_labels': torch.randn((2, 1_0, *size) , device=snake_case),
'class_labels': torch.zeros(2 , 1_0 , device=snake_case).long(),
}
_UpperCAmelCase : List[str] =self.model_tester.get_config()
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation(snake_case).to(snake_case)
_UpperCAmelCase : Any =model(**snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
_UpperCAmelCase : Dict =model(**snake_case , output_attentions=snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : List[str] =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : List[str] =model_class(snake_case)
model.to(snake_case)
model.train()
_UpperCAmelCase : Tuple =model(snake_case , mask_labels=snake_case , class_labels=snake_case).loss
loss.backward()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : str =True
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
model.train()
_UpperCAmelCase : Union[str, Any] =model(snake_case , mask_labels=snake_case , class_labels=snake_case)
_UpperCAmelCase : Any =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Any =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : int =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : List[str] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowercase =1e-4
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(snake_case)
_UpperCAmelCase : List[str] =self.default_image_processor
_UpperCAmelCase : int =prepare_img()
_UpperCAmelCase : Any =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : Dict =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : int =model(**snake_case)
_UpperCAmelCase : List[Any] =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Dict =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Optional[int] =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : str =self.default_image_processor
_UpperCAmelCase : Any =prepare_img()
_UpperCAmelCase : Dict =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : List[Any] =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : Dict =model(**snake_case)
# masks_queries_logits
_UpperCAmelCase : Union[str, Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_UpperCAmelCase : str =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case).to(snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case))
# class_queries_logits
_UpperCAmelCase : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_UpperCAmelCase : Optional[Any] =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : Optional[Any] =self.default_image_processor
_UpperCAmelCase : List[Any] =image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
_UpperCAmelCase : int =inputs['pixel_values'].to(snake_case)
_UpperCAmelCase : Union[str, Any] =[el.to(snake_case) for el in inputs['mask_labels']]
_UpperCAmelCase : Tuple =[el.to(snake_case) for el in inputs['class_labels']]
with torch.no_grad():
_UpperCAmelCase : List[str] =model(**snake_case)
self.assertTrue(outputs.loss is not None)
| 446 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCAmelCase = """bert-base-cased"""
_lowerCAmelCase = """google/pegasus-xsum"""
_lowerCAmelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_lowerCAmelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
_lowerCAmelCase = """sshleifer/bart-tiny-random"""
_lowerCAmelCase = """sshleifer/tiny-mbart"""
_lowerCAmelCase = """sshleifer/tiny-marian-en-de"""
def UpperCamelCase ( _A , _A ) -> Tuple:
lowercase : Dict = """\n""".join(_lowerCamelCase )
Path(_lowerCamelCase ).open("""w""" ).writelines(_lowerCamelCase )
def UpperCamelCase ( _A ) -> Optional[Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCamelCase , F"""{split}.source""" ) , _lowerCamelCase )
_dump_articles(os.path.join(_lowerCamelCase , F"""{split}.target""" ) , _lowerCamelCase )
return tmp_dir
class UpperCamelCase (__snake_case ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __snake_case ( self :Dict , __magic_name__ :int ) ->Dict:
lowercase : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowercase : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowercase : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowercase : Dict = 4
lowercase : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase , lowercase : Any = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowercase : List[str] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
lowercase : str = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase : Any = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __snake_case ( self :Optional[Any] , __magic_name__ :int ) ->List[Any]:
lowercase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowercase : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase : Any = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowercase : List[str] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowercase : int = 4
lowercase : List[str] = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=20 , max_target_length=UpperCamelCase__ , )
lowercase : int = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __snake_case ( self :List[str] ) ->Any:
lowercase : Any = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowercase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase : int = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowercase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 128 , UpperCamelCase__ )
lowercase : Dict = {x.name for x in tmp_dir.iterdir()}
lowercase : Optional[Any] = {x.name for x in save_dir.iterdir()}
lowercase : Union[str, Any] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
lowercase , lowercase , lowercase : Any = self._get_dataset(max_len=64 )
lowercase : List[str] = 64
lowercase : List[str] = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
lowercase : Tuple = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
lowercase : Optional[int] = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : Any = []
lowercase : Optional[int] = []
for batch in data_loader:
lowercase : List[Any] = batch["""input_ids"""].shape
lowercase : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase : Union[str, Any] = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(f"""too many tokens in {len(UpperCamelCase__ )} batches""" )
def __snake_case ( self :int ) ->Optional[Any]:
lowercase , lowercase , lowercase : Tuple = self._get_dataset(max_len=512 )
lowercase : List[str] = 2
lowercase : str = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
lowercase : str = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowercase : Any = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
lowercase : str = tokenizer.pad_token_id
def count_pad_tokens(__magic_name__ :Tuple , __magic_name__ :Optional[Any]="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) ) < sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def __snake_case ( self :int , __magic_name__ :List[Any]=1_000 , __magic_name__ :Union[str, Any]=128 ) ->Dict:
if os.getenv("""USE_REAL_DATA""" , UpperCamelCase__ ):
lowercase : List[str] = """examples/seq2seq/wmt_en_ro"""
lowercase : Dict = max_len * 2 * 64
if not Path(UpperCamelCase__ ).joinpath("""train.len""" ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
lowercase : Optional[int] = """examples/seq2seq/test_data/wmt_en_ro"""
lowercase : Tuple = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowercase : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def __snake_case ( self :List[Any] ) ->List[str]:
lowercase , lowercase , lowercase : Any = self._get_dataset()
lowercase : Optional[int] = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
lowercase : Tuple = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __snake_case ( self :Union[str, Any] , __magic_name__ :str ) ->Optional[int]:
lowercase : str = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
lowercase : Optional[Any] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowercase : str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase : Tuple = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowercase : str = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0
| 700 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.read()
lowercase , lowercase : Optional[int] = REPLACE_PATTERNS[pattern]
lowercase : Optional[Any] = replace.replace("""VERSION""" , _A )
lowercase : Tuple = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def UpperCamelCase ( _A ) -> List[str]:
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def UpperCamelCase ( ) -> Any:
lowercase : Any = """🤗 Transformers currently provides the following architectures"""
lowercase : Dict = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.readlines()
# Find the start of the list.
lowercase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase : List[str] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def UpperCamelCase ( ) -> Tuple:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase : List[str] = f.read()
lowercase : List[Any] = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def UpperCamelCase ( _A=False ) -> List[Any]:
lowercase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase : Union[str, Any] = default_version.base_version
elif patch:
lowercase : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
lowercase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = get_version()
lowercase : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase : Dict = current_version.base_version
# Check with the user we got that right.
lowercase : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
lowercase : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 348 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCAmelCase ( __a : str ,__a : tuple ,__a : Path ,__a : str ,__a : Optional[Any] ,__a : Dict ,__a : Optional[Any] ,__a : Optional[Any]=False ,) -> Dict:
"""simple docstring"""
output_path.parent.mkdir(parents=__a ,exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,use_external_data_format=__a ,enable_onnx_checker=__a ,opset_version=__a ,)
else:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,opset_version=__a ,)
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : int ,__a : bool = False ) -> Optional[Any]:
"""simple docstring"""
_a : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_a : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
_a : List[Any] = '''cpu'''
_a : Union[str, Any] = Path(__a )
# VAE DECODER
_a : Dict = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
_a : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
_a : Any = vae_decoder.decode
onnx_export(
__a ,model_args=(
torch.randn(1 ,__a ,25 ,25 ).to(device=__a ,dtype=__a ),
False,
) ,output_path=output_path / '''vae_decoder''' / '''model.onnx''' ,ordered_input_names=['''latent_sample''', '''return_dict'''] ,output_names=['''sample'''] ,dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} ,opset=__a ,)
del vae_decoder
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
a__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 14 |
import requests
from bsa import BeautifulSoup
def a (_lowerCAmelCase = "https://www.worldometers.info/coronavirus" ):
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(_lowerCAmelCase ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE_ = soup.findAll('''h1''' )
SCREAMING_SNAKE_CASE_ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCAmelCase , _lowerCAmelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 234 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __a ):
@slow
@require_torch
def snake_case__ ( self : List[Any] ):
__magic_name__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__magic_name__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ = bertabert.config.encoder.vocab_size
__magic_name__ = tokenizer.sep_token_id
__magic_name__ = tokenizer.cls_token_id
__magic_name__ = 128
__magic_name__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__magic_name__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__magic_name__ = train_dataset.select(range(32 ) )
__magic_name__ = val_dataset.select(range(16 ) )
__magic_name__ = 4
def _map_to_encoder_decoder_inputs(a__ : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=a__ , max_length=512 )
__magic_name__ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=a__ , max_length=128 )
__magic_name__ = inputs.input_ids
__magic_name__ = inputs.attention_mask
__magic_name__ = outputs.input_ids
__magic_name__ = outputs.input_ids.copy()
__magic_name__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__magic_name__ = outputs.attention_mask
assert all(len(a__ ) == 512 for x in inputs.input_ids )
assert all(len(a__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a__ : Union[str, Any] ):
__magic_name__ = pred.label_ids
__magic_name__ = pred.predictions
# all unnecessary tokens are removed
__magic_name__ = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
__magic_name__ = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
__magic_name__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a__ ) )] ) / len(a__ )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=a__ , batch_size=a__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__magic_name__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=a__ , batch_size=a__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__magic_name__ = self.get_auto_remove_tmp_dir()
__magic_name__ = SeqaSeqTrainingArguments(
output_dir=a__ , per_device_train_batch_size=a__ , per_device_eval_batch_size=a__ , predict_with_generate=a__ , evaluation_strategy='''steps''' , do_train=a__ , do_eval=a__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ = SeqaSeqTrainer(
model=a__ , args=a__ , compute_metrics=_compute_metrics , train_dataset=a__ , eval_dataset=a__ , tokenizer=a__ , )
# start training
trainer.train()
| 245 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _SCREAMING_SNAKE_CASE :
def snake_case__ ( self : List[Any] , a__ : Optional[int] , a__ : List[str] ):
pass
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : Optional[int] ):
pass
def snake_case__ ( self : List[Any] , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : str=None , **a__ : Dict ):
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : Any , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : Any , a__ : Dict=None , **a__ : Optional[Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : Tuple , a__ : Optional[Any] , a__ : int , a__ : Optional[int] , a__ : int=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , a__ : List[Any] , a__ : Optional[int] , a__ : int , a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__magic_name__ = after_output[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
def snake_case__ ( self : str , a__ : Any , a__ : Optional[int] , a__ : List[str] , a__ : Tuple , a__ : Optional[Any]=None , **a__ : int ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : Optional[Any] , a__ : np.ndarray , a__ : np.ndarray , a__ : float ):
__magic_name__ = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def snake_case__ ( self : int ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@slow
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.get_pretrained_model_and_inputs()
__magic_name__ = model_a(**a__ )
__magic_name__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__magic_name__ = model_a(**a__ )
__magic_name__ = after_outputs[0].numpy()
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : int ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , a__ : Any , a__ : List[Any] ):
__magic_name__ = TFViTModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : List[str] ):
__magic_name__ = TFViTModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : Tuple ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Dict , a__ : Any , a__ : Tuple , a__ : str , a__ : Any , a__ : Union[str, Any]=None , **a__ : List[str] ):
__magic_name__ , __magic_name__ = self.get_vision_text_model(a__ , a__ )
__magic_name__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__magic_name__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : str , a__ : int , a__ : Any ):
__magic_name__ = TFDeiTModel(a__ , name='''vision_model''' )
__magic_name__ = TFRobertaModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : Dict ):
__magic_name__ = TFDeiTModelTester(self )
__magic_name__ = TFRobertaModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__magic_name__ = 13
__magic_name__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , a__ : int , a__ : Dict ):
__magic_name__ = TFCLIPVisionModel(a__ , name='''vision_model''' )
__magic_name__ = TFBertModel(a__ , name='''text_model''' )
return vision_model, text_model
def snake_case__ ( self : str ):
__magic_name__ = TFCLIPVisionModelTester(self )
__magic_name__ = TFBertModelTester(self )
__magic_name__ = clip_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=a__ )
__magic_name__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=a__ , padding=a__ , return_tensors='''np''' )
__magic_name__ = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1E-3 ) )
| 245 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""note_seq"""]
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 177 |
"""simple docstring"""
def __lowercase ( snake_case_ : list ) ->list:
'''simple docstring'''
for i in range(len(snake_case_ ) - 1 ,0 ,-1 ):
__A : Union[str, Any] = False
for j in range(snake_case_ ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
__A , __A : Union[str, Any] = unsorted[j - 1], unsorted[j]
__A : Optional[int] = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
__A , __A : Optional[Any] = unsorted[j + 1], unsorted[j]
__A : Union[str, Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 177 | 1 |
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Union[str, Any] = name
lowercase__ : Tuple = val
def __str__( self ) -> Any:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __lowerCAmelCase ) -> List[Any]:
return self.val < other.val
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : List[Any] = {}
lowercase__ : Union[str, Any] = {}
lowercase__ : Union[str, Any] = self.build_heap(__a )
def __getitem__( self , __lowerCAmelCase ) -> Optional[int]:
return self.get_value(__a )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
return (idx - 1) // 2
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return idx * 2 + 1
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return idx * 2 + 2
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
return self.heap_dict[key]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Optional[int] = len(__a ) - 1
lowercase__ : Any = self.get_parent_idx(__a )
for idx, i in enumerate(__a ):
lowercase__ : Tuple = idx
lowercase__ : Union[str, Any] = i.val
for i in range(__a , -1 , -1 ):
self.sift_down(__a , __a )
return array
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
while True:
lowercase__ : Dict = self.get_left_child_idx(__a ) # noqa: E741
lowercase__ : Any = self.get_right_child_idx(__a )
lowercase__ : Tuple = idx
if l < len(__a ) and array[l] < array[idx]:
lowercase__ : Tuple = l
if r < len(__a ) and array[r] < array[smallest]:
lowercase__ : str = r
if smallest != idx:
lowercase__ : Optional[int] = array[smallest], array[idx]
(
lowercase__
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase__ : int = smallest
else:
break
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : List[str] = self.get_parent_idx(__a )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase__ : Optional[Any] = self.heap[idx], self.heap[p]
lowercase__ : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase__ : List[str] = p
lowercase__ : Optional[Any] = self.get_parent_idx(__a )
def _lowerCAmelCase( self ) -> int:
return self.heap[0]
def _lowerCAmelCase( self ) -> str:
lowercase__ : str = self.heap[-1], self.heap[0]
lowercase__ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase__ : Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
self.heap.append(__a )
lowercase__ : int = len(self.heap ) - 1
lowercase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowerCAmelCase( self ) -> int:
return len(self.heap ) == 0
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase__ : Union[str, Any] = new_value
lowercase__ : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
__a: List[str] = Node("""R""", -1)
__a: Optional[Any] = Node("""B""", 6)
__a: List[str] = Node("""A""", 3)
__a: int = Node("""X""", 1)
__a: List[str] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__a: str = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
SCREAMING_SNAKE_CASE = True
@property
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = 1
lowercase__ : Tuple = 4
lowercase__ : Dict = (16, 16)
lowercase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
def _lowerCAmelCase( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ : int = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__lowerCAmelCase , only_cross_attention=__lowerCAmelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowercase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowercase__ : Optional[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowercase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowercase__ : Optional[Any] = CLIPTextModel(__lowerCAmelCase )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[Any] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Optional[int]:
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Tuple = '''cpu'''
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : Any = pipe(**__lowerCAmelCase ).images
lowercase__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase__ : int = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
lowercase__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> Any:
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**__lowerCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : List[str] = 2
lowercase__ : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ : Union[str, Any] = getattr(__lowerCAmelCase , scheduler_enum.name )
lowercase__ : Optional[int] = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ : Dict = pipe(**__lowerCAmelCase )[0]
outputs.append(__lowerCAmelCase )
assert check_same_shape(__lowerCAmelCase )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = torch.manual_seed(33 )
lowercase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase__ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Optional[int] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowercase__ : Any = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , output_type='''latent''' ).images
lowercase__ : Optional[int] = upscaler(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCAmelCase , output_type='''np''' , ).images[0]
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[str] = torch.manual_seed(33 )
lowercase__ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : int = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowercase__ : List[Any] = upscaler(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCAmelCase , output_type='''np''' , ).images[0]
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 428 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : int = start
__UpperCAmelCase : Tuple = end
__UpperCAmelCase : Optional[int] = val
__UpperCAmelCase : Union[str, Any] = (start + end) // 2
__UpperCAmelCase : Dict = left
__UpperCAmelCase : str = right
def __repr__( self : List[str]):
"""simple docstring"""
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class a__ :
def __init__( self : Union[str, Any] , UpperCamelCase_ : Sequence , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Any = collection
__UpperCAmelCase : Any = function
if self.collection:
__UpperCAmelCase : Optional[int] = self._build_tree(0 , len(_lowerCAmelCase) - 1)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
self._update_tree(self.root , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]):
"""simple docstring"""
return self._query_range(self.root , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_lowerCAmelCase , _lowerCAmelCase , self.collection[start])
__UpperCAmelCase : int = (start + end) // 2
__UpperCAmelCase : Dict = self._build_tree(_lowerCAmelCase , _lowerCAmelCase)
__UpperCAmelCase : List[Any] = self._build_tree(mid + 1 , _lowerCAmelCase)
return SegmentTreeNode(_lowerCAmelCase , _lowerCAmelCase , self.fn(left.val , right.val) , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
if node.start == i and node.end == i:
__UpperCAmelCase : List[str] = val
return
if i <= node.mid:
self._update_tree(node.left , _lowerCAmelCase , _lowerCAmelCase)
else:
self._update_tree(node.right , _lowerCAmelCase , _lowerCAmelCase)
__UpperCAmelCase : Optional[int] = self.fn(node.left.val , node.right.val)
def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowerCAmelCase , _lowerCAmelCase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowerCAmelCase , node.mid) , self._query_range(node.right , node.mid + 1 , _lowerCAmelCase) , )
else:
# range in right child tree
return self._query_range(node.right , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : Optional[int]):
"""simple docstring"""
if self.root is not None:
__UpperCAmelCase : List[str] = Queue()
queue.put(self.root)
while not queue.empty():
__UpperCAmelCase : Optional[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 77 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
return len(UpperCamelCase_ ) == 9 and set(UpperCamelCase_ ) == set("""123456789""" )
def _lowerCAmelCase ( ):
for base_num in range(9999 , 4999 , -1 ):
__SCREAMING_SNAKE_CASE = 10_0002 * base_num
if is_9_pandigital(UpperCamelCase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__SCREAMING_SNAKE_CASE = 100_2003 * base_num
if is_9_pandigital(UpperCamelCase_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 |
"""simple docstring"""
__magic_name__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__ = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 248 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=lowercase__ ):
'''simple docstring'''
a__ = ["speech"]
def __init__(self , *lowercase__ , **lowercase__ ) -> Tuple:
requires_backends(self , ['''speech'''] )
class A_ ( metaclass=lowercase__ ):
'''simple docstring'''
a__ = ["speech"]
def __init__(self , *lowercase__ , **lowercase__ ) -> Dict:
requires_backends(self , ['''speech'''] )
| 303 |
"""simple docstring"""
from __future__ import annotations
class lowercase :
def __init__(self : List[Any] ,SCREAMING_SNAKE_CASE_ : int = 0 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = key
def UpperCAmelCase (self : Tuple ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int ) -> list[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(SCREAMING_SNAKE_CASE_ ) ^ key ) for ch in content]
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int ) -> list[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(SCREAMING_SNAKE_CASE_ ) ^ key ) for ch in content]
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = ''''''
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE_ ) ^ key )
return ans
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = ''''''
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE_ ) ^ key )
return ans
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int = 0 ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
try:
with open(SCREAMING_SNAKE_CASE_ ) as fin, open('''encrypt.out''' ,'''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
except OSError:
return False
return True
def UpperCAmelCase (self : List[Any] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
try:
with open(SCREAMING_SNAKE_CASE_ ) as fin, open('''decrypt.out''' ,'''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 535 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {"vocab_file": "spiece.model"}
UpperCamelCase : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCamelCase : Optional[Any] = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
UpperCamelCase : List[Any] = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__UpperCamelCase = ' '.join(inputs.strip().split() )
else:
__UpperCamelCase = inputs
__UpperCamelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__UpperCamelCase = unicodedata.normalize('NFKD' , __UpperCAmelCase )
__UpperCamelCase = ''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.preprocess_text(__UpperCAmelCase )
__UpperCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__UpperCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase = cur_pieces[1:]
else:
__UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = ''
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__UpperCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 293 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase : Union[str, Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase : Union[str, Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
UpperCamelCase : Optional[int] = os.environ.get("USER_TOKEN", "")
def A ( snake_case :str ) -> dict[Any, Any]:
__UpperCamelCase = {
'Authorization': f'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(snake_case , headers=snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 293 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase : Any = ["""input_features""", """attention_mask"""]
def __init__( self : Dict , _lowercase : str=80 , _lowercase : Any=16_000 , _lowercase : List[Any]=80 , _lowercase : Any=0.0 , _lowercase : List[str]=True , _lowercase : Any=True , _lowercase : int=True , **_lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
_UpperCamelCase: Optional[Any] = num_mel_bins
_UpperCamelCase: Dict = do_ceptral_normalize
_UpperCamelCase: Union[str, Any] = normalize_means
_UpperCamelCase: Union[str, Any] = normalize_vars
_UpperCamelCase: str = True
def lowerCAmelCase ( self : List[Any] , _lowercase : str , ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_UpperCamelCase: List[Any] = torch.from_numpy(A_ ).unsqueeze(0 )
_UpperCamelCase: Tuple = ta_kaldi.fbank(A_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] = True , _lowercase : Tuple = True , _lowercase : int = 0.0 , ):
"""simple docstring"""
if normalize_means:
_UpperCamelCase: Optional[int] = x[:input_length].mean(axis=0 )
_UpperCamelCase: int = np.subtract(A_ , A_ )
if normalize_vars:
_UpperCamelCase: Optional[int] = x[:input_length].std(axis=0 )
_UpperCamelCase: Dict = np.divide(A_ , A_ )
if input_length < x.shape[0]:
_UpperCamelCase: Tuple = padding_value
# make sure array is in float32
_UpperCamelCase: Optional[int] = x.astype(np.floataa )
return x
def lowerCAmelCase ( self : Optional[Any] , _lowercase : Tuple , _lowercase : Tuple = None ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(A_ , A_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(A_ , A_ )
]
def __call__( self : List[str] , _lowercase : Dict , _lowercase : Optional[Any] = False , _lowercase : Optional[Any] = None , _lowercase : List[Any] = False , _lowercase : Optional[Any] = None , _lowercase : Optional[int] = None , _lowercase : Any = None , _lowercase : List[str] = None , **_lowercase : str , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_UpperCamelCase: Any = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCamelCase: int = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase: Optional[Any] = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
_UpperCamelCase: Union[str, Any] = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase: Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase: Dict = [raw_speech]
# extract fbank features
_UpperCamelCase: List[str] = [self._extract_fbank_features(A_ ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCamelCase: List[str] = BatchFeature({'''input_features''': features} )
_UpperCamelCase: str = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
_UpperCamelCase: int = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , A_ ):
_UpperCamelCase: List[Any] = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
_UpperCamelCase: List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_UpperCamelCase: Optional[int] = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCamelCase: str = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCamelCase: Optional[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=A_ )
if return_tensors is not None:
_UpperCamelCase: str = padded_inputs.convert_to_tensors(A_ )
return padded_inputs | 271 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __snake_case ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE__ = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE__ = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE__ = [2, 2, 2_0]
SCREAMING_SNAKE_CASE__ = [3, 1_2, 1_6]
SCREAMING_SNAKE_CASE__ = [1_9_2, 7_6_8, 1_0_2_4]
SCREAMING_SNAKE_CASE__ = CvtForImageClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE__ = list_of_state_dict + cls_token(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + embeddings(lowerCAmelCase_ )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE__ = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 100 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = '''unispeech'''
def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=128 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=320 , a_=2 , a_=0.1 , a_=100 , a_=256 , a_=256 , a_=0.1 , a_="mean" , a_=False , a_=False , a_=256 , a_=80 , a_=0 , a_=1 , a_=2 , a_=0.5 , **a_ , ):
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Any = feat_extract_norm
lowerCamelCase_ : List[Any] = feat_extract_activation
lowerCamelCase_ : Optional[int] = list(a_ )
lowerCamelCase_ : Optional[int] = list(a_ )
lowerCamelCase_ : List[str] = list(a_ )
lowerCamelCase_ : Union[str, Any] = conv_bias
lowerCamelCase_ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase_ : Tuple = num_conv_pos_embedding_groups
lowerCamelCase_ : List[Any] = len(self.conv_dim )
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : str = hidden_dropout
lowerCamelCase_ : Union[str, Any] = attention_dropout
lowerCamelCase_ : List[str] = activation_dropout
lowerCamelCase_ : int = feat_proj_dropout
lowerCamelCase_ : Any = final_dropout
lowerCamelCase_ : Optional[int] = layerdrop
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Dict = num_ctc_classes
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : Any = do_stable_layer_norm
lowerCamelCase_ : List[Any] = use_weighted_layer_sum
lowerCamelCase_ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[Any] = apply_spec_augment
lowerCamelCase_ : Dict = mask_time_prob
lowerCamelCase_ : Union[str, Any] = mask_time_length
lowerCamelCase_ : List[str] = mask_time_min_masks
lowerCamelCase_ : List[str] = mask_feature_prob
lowerCamelCase_ : Union[str, Any] = mask_feature_length
lowerCamelCase_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : Any = num_codevectors_per_group
lowerCamelCase_ : List[Any] = num_codevector_groups
lowerCamelCase_ : Dict = contrastive_logits_temperature
lowerCamelCase_ : Union[str, Any] = feat_quantizer_dropout
lowerCamelCase_ : Union[str, Any] = num_negatives
lowerCamelCase_ : Optional[int] = codevector_dim
lowerCamelCase_ : Optional[int] = proj_codevector_dim
lowerCamelCase_ : Any = diversity_loss_weight
# ctc loss
lowerCamelCase_ : Optional[Any] = ctc_loss_reduction
lowerCamelCase_ : int = ctc_zero_infinity
# pretraining loss
lowerCamelCase_ : Dict = replace_prob
@property
def _UpperCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ =re.compile(R"\s+")
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(UpperCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = [len(UpperCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=5 ):
"""simple docstring"""
__lowercase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__lowercase = example["""content"""].splitlines()
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : str=0.05 ):
"""simple docstring"""
__lowercase = ["""unit tests""", """test file""", """configuration file"""]
__lowercase = example["""content"""].splitlines()
__lowercase = 0
__lowercase = 0
# first test
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__lowercase = example["""content"""].count("""\n""" )
__lowercase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = ["""def """, """class """, """for """, """while """]
__lowercase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=4 ):
"""simple docstring"""
__lowercase = example["""content"""].splitlines()
__lowercase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = tokenizer(example["""content"""] , truncation=UpperCamelCase__ )["""input_ids"""]
__lowercase = len(example["""content"""] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__lowercase = {}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
if not check_uniques(UpperCamelCase__ , UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
UpperCAmelCase__ =HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ =parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ =multiprocessing.cpu_count()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ =time.time()
UpperCAmelCase__ =load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ =time.time()
UpperCAmelCase__ =ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ =set(ds.unique("hash"))
UpperCAmelCase__ =len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ =time.time()
UpperCAmelCase__ =ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ =time.time()
UpperCAmelCase__ , UpperCAmelCase__ =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ =output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ =str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 616 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int = 20 ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616 | 1 |
from math import isqrt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE ) + 1 ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 10**6 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 452 | def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase :int = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase :List[Any] = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase :List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [0] + result[-1] + [0]
__UpperCamelCase :Any = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase :Optional[Any] = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
__UpperCamelCase :Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase :List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase :List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def lowerCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = f"""{func.__name__}({value})"""
__UpperCamelCase :Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 452 | 1 |
import math
from datetime import datetime, timedelta
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> datetime:
'''simple docstring'''
UpperCAmelCase = year % 19
UpperCAmelCase = year % 4
UpperCAmelCase = year % 7
UpperCAmelCase = math.floor(year / 100 )
UpperCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase = leap_day_inhibits / 4
UpperCAmelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase__ , 4 , 18 )
else:
return datetime(UpperCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__A : Dict = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 130 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__A : Any = logging.get_logger(__name__)
class A_ (a_ ):
def __init__( self , _A=None , **_A ):
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _A , )
super().__init__(args=_A , **_A )
| 130 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] ) -> Any:
super().__init__()
lowerCamelCase__ : Any = nn.Linear(3 , 4 )
lowerCamelCase__ : List[Any] = nn.BatchNormad(4 )
lowerCamelCase__ : Any = nn.Linear(4 , 5 )
def A_ ( self : List[Any] , UpperCAmelCase : Dict ) -> Any:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class lowerCAmelCase ( lowercase__ ):
def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ) -> Any:
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase ( lowercase__ ):
def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> str:
return output + 1
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Any ) -> str:
lowerCamelCase__ : List[str] = ModelForTest()
lowerCamelCase__ : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(test_model._hf_hook , UpperCAmelCase )
self.assertTrue(hasattr(UpperCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCAmelCase )
self.assertFalse(hasattr(UpperCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCAmelCase , '_old_forward' ) )
def A_ ( self : str ) -> List[Any]:
lowerCamelCase__ : List[Any] = ModelForTest()
lowerCamelCase__ : Optional[Any] = ModelHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
add_hook_to_module(UpperCAmelCase , UpperCAmelCase , append=UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCAmelCase )
self.assertFalse(hasattr(UpperCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCAmelCase , '_old_forward' ) )
def A_ ( self : List[str] ) -> List[Any]:
lowerCamelCase__ : Any = ModelForTest()
lowerCamelCase__ : Any = torch.randn(2 , 3 )
lowerCamelCase__ : Any = test_model(x + 1 )
lowerCamelCase__ : int = test_model(x + 2 )
lowerCamelCase__ : List[str] = PreForwardHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = test_model(UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase__ : Dict = PreForwardHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = test_model(UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase__ : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = test_model(UpperCAmelCase )
assert torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 )
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : int = ModelForTest()
lowerCamelCase__ : Tuple = torch.randn(2 , 3 )
lowerCamelCase__ : Tuple = test_model(UpperCAmelCase )
lowerCamelCase__ : str = PostForwardHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = test_model(UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase__ : Tuple = PostForwardHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = test_model(UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase__ : List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = test_model(UpperCAmelCase )
assert torch.allclose(UpperCAmelCase , output + 2 , atol=1e-5 )
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : str = ModelForTest()
lowerCamelCase__ : Tuple = torch.randn(2 , 3 )
lowerCamelCase__ : int = test_model(UpperCAmelCase )
lowerCamelCase__ : Dict = PostForwardHook()
add_hook_to_module(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : int = test_model(UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = test_model(UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : int ) -> Dict:
lowerCamelCase__ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase__ : Any = torch.randn(2 , 3 )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase , AlignDevicesHook(io_same_device=UpperCAmelCase ) )
lowerCamelCase__ : List[str] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase__ : Tuple = model(UpperCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : int = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : str = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase )
lowerCamelCase__ : int = torch.randn(2 , 3 )
lowerCamelCase__ : str = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
lowerCamelCase__ : Tuple = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : int = torch.randn(2 , 3 )
lowerCamelCase__ : Tuple = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self : Dict ) -> Union[str, Any]:
lowerCamelCase__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : Any = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(UpperCAmelCase , execution_device=UpperCAmelCase , offload=UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : Optional[int] = torch.device(UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase )
lowerCamelCase__ : Any = torch.randn(2 , 3 )
lowerCamelCase__ : Dict = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase , execution_device=UpperCAmelCase , offload=UpperCAmelCase , offload_buffers=UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : Tuple = torch.randn(2 , 3 )
lowerCamelCase__ : Any = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : Union[str, Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
UpperCAmelCase , execution_device=UpperCAmelCase , offload=UpperCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : str = torch.device(UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase )
lowerCamelCase__ : Any = torch.randn(2 , 3 )
lowerCamelCase__ : str = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase , execution_device=UpperCAmelCase , offload=UpperCAmelCase , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase__ : Dict = model(UpperCAmelCase )
self.assertEqual(output.device , UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 704 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default="""cifar10""", metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The column name of the images in the files."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase__ = field(
default=0.15, metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = {}
if self.train_dir is not None:
lowerCamelCase__ : int = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Dict = self.validation_dir
lowerCamelCase__ : Union[str, Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase__ = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
UpperCAmelCase__ = field(
default=0.75, metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = field(
default=1E-3, metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : str = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : List[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Optional[int] = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[str] = split['train']
lowerCamelCase__ : List[str] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Union[str, Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ : Dict = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
lowerCamelCase__ : Union[str, Any] = ds['train'].column_names
else:
lowerCamelCase__ : Any = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : str = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : Tuple = 'image'
elif "img" in column_names:
lowerCamelCase__ : int = 'img'
else:
lowerCamelCase__ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : List[Any] = image_processor.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ : Optional[Any] = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCAmelCase ):
lowerCamelCase__ : Tuple = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Optional[int] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[str] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
lowerCamelCase__ : Union[str, Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ : str = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Any = last_checkpoint
lowerCamelCase__ : Optional[int] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : Dict = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Union[str, Any] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 188 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
snake_case__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 395 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Dict , __A : Path , __A : Union[str, None] = None , __A : Union[List[str], None] = None , __A : Union[str, List[str], None] = None , __A : bool = True , ) ->Any:
"""simple docstring"""
a__ :Dict = [file for file in os.listdir(__A ) if os.path.isfile(os.path.join(__A , __A ) )]
if identifier is not None:
a__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__A , __A ):
for n_ in n_identifier:
a__ :Union[str, Any] = [file for file in files if n_ not in file]
else:
a__ :Dict = [file for file in files if n_identifier not in file]
a__ :List[str] = ignore_files or []
ignore_files.append("__init__.py" )
a__ :Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __A )
if only_modules:
a__ :Tuple = file.split("." )[0]
try:
a__ :Dict = getattr(__A , __A )
a__ :int = doctest.DocTestSuite(__A )
a__ :Any = unittest.TextTestRunner().run(__A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a__ :int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = Path("src/transformers" )
a__ :Union[str, Any] = "modeling"
a__ :Any = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__A , identifier=__A , ignore_files=__A )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = Path("src/transformers" )
a__ :Dict = "tokenization"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = Path("src/transformers" )
a__ :List[Any] = "configuration"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = Path("src/transformers" )
a__ :str = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__A , n_identifier=__A )
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = Path("docs/source" )
a__ :Union[str, Any] = ["favicon.ico"]
self.analyze_directory(__A , ignore_files=__A , only_modules=__A )
| 395 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a : Optional[int] = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__a : Optional[Any] = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__a : Union[str, Any] = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
__a : Any = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__a : Dict = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=[1, 10, 1_00] , lowerCAmelCase__=4 , lowerCAmelCase__=3.0 ) -> Dict:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowerCAmelCase__ ) as executor:
__lowercase = []
__lowercase = Counter()
__lowercase = 0
__lowercase = defaultdict(lowerCAmelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
for candidate in candidates:
__lowercase = candidate + '''\n''' + test_case
__lowercase = (test_program, timeout, task_id, completion_id[task_id])
__lowercase = executor.submit(lowerCAmelCase__ , *lowerCAmelCase__ )
futures.append(lowerCAmelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCAmelCase__ ):
__lowercase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__lowercase , __lowercase = [], []
for result in results.values():
result.sort()
__lowercase = [r[1]['''passed'''] for r in result]
total.append(len(lowerCAmelCase__ ) )
correct.append(sum(lowerCAmelCase__ ) )
__lowercase = np.array(lowerCAmelCase__ )
__lowercase = np.array(lowerCAmelCase__ )
__lowercase = k
__lowercase = {F"pass@{k}": estimate_pass_at_k(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
def estimator(lowercase , lowercase , lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase , lowercase ):
__lowercase = itertools.repeat(lowercase , len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
__lowercase = iter(lowercase )
return np.array([estimator(int(lowercase ) , int(lowercase ) , lowercase ) for n, c in zip(lowercase , lowercase )] ) | 522 | from math import factorial
__a : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase ) )
def UpperCAmelCase ( lowercase = 60 , lowercase = 1000000 ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not isinstance(lowercase , lowercase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
__lowercase = 0
# the cached sizes of the previous chains
__lowercase = {}
for start_chain_element in range(1 , lowercase ):
# The temporary set will contain the elements of the chain
__lowercase = set()
__lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase )
chain_set_length += 1
__lowercase = digit_factorial_sum(lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''') | 522 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> str:
__UpperCamelCase = size if size is not None else {'shortest_edge': 20}
__UpperCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
def __lowercase( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MobileNetVaImageProcessor if is_vision_available() else None
def __lowercase( self ) -> Tuple:
__UpperCamelCase = MobileNetVaImageProcessingTester(self )
@property
def __lowercase( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self ) -> List[str]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'crop_size' ) )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowercase( self ) -> List[str]:
pass
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowercase( self ) -> int:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 383 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
stooge(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase_ , lowercase_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase_ : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , i + t , (__SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 425 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """beit"""
def __init__( self : List[Any] ,A : Any=8_192 ,A : Union[str, Any]=768 ,A : Any=12 ,A : Dict=12 ,A : Any=3_072 ,A : Union[str, Any]="gelu" ,A : Any=0.0 ,A : List[str]=0.0 ,A : Tuple=0.0_2 ,A : List[Any]=1e-12 ,A : Union[str, Any]=224 ,A : Union[str, Any]=16 ,A : Optional[Any]=3 ,A : int=False ,A : Optional[Any]=False ,A : Any=False ,A : List[str]=False ,A : List[Any]=0.1 ,A : List[str]=0.1 ,A : List[Any]=True ,A : Tuple=[3, 5, 7, 11] ,A : Optional[Any]=[1, 2, 3, 6] ,A : List[str]=True ,A : List[Any]=0.4 ,A : Optional[Any]=256 ,A : str=1 ,A : Tuple=False ,A : Tuple=255 ,**A : Dict ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = use_mask_token
UpperCAmelCase__ : Any = use_absolute_position_embeddings
UpperCAmelCase__ : Tuple = use_relative_position_bias
UpperCAmelCase__ : List[Any] = use_shared_relative_position_bias
UpperCAmelCase__ : Any = layer_scale_init_value
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase__ : List[str] = out_indices
UpperCAmelCase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ : Any = use_auxiliary_head
UpperCAmelCase__ : List[str] = auxiliary_loss_weight
UpperCAmelCase__ : List[Any] = auxiliary_channels
UpperCAmelCase__ : int = auxiliary_num_convs
UpperCAmelCase__ : Any = auxiliary_concat_input
UpperCAmelCase__ : Optional[int] = semantic_loss_ignore_index
class __lowercase ( __lowerCamelCase ):
snake_case_ = version.parse("""1.11""" )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 1e-4
| 194 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(range(len(__UpperCamelCase ) ) )
UpperCAmelCase__ : Union[str, Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : Optional[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : Tuple ) -> str:
_lowercase = ort.SessionOptions()
_lowercase = False
return options
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowercase = 'A red cat sitting on a park bench'
_lowercase = np.random.RandomState(0 )
_lowercase = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type='np' ,)
_lowercase = output.images
_lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowercase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowercase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,subfolder='scheduler' ,revision='onnx' )
_lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowercase = 'A red cat sitting on a park bench'
_lowercase = np.random.RandomState(0 )
_lowercase = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type='np' ,)
_lowercase = output.images
_lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowercase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 67 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Tuple = '▁'
snake_case_ : List[str] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case_ : Tuple = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
snake_case_ : Union[str, Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
snake_case_ : List[Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
lowercase__ = []
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[int]="<s>" ,lowerCamelCase__ : Optional[int]="</s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="m2m100" ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,lowerCamelCase__ : Optional[Any]=8 ,**lowerCamelCase__ : str ,):
'''simple docstring'''
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase : Optional[int] = language_codes
_UpperCamelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCamelCase : str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_UpperCamelCase : Optional[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCamelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCamelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,language_codes=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Any = vocab_file
_UpperCamelCase : int = load_json(lowerCamelCase__ )
_UpperCamelCase : int = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Any = spm_file
_UpperCamelCase : Optional[Any] = load_spm(lowerCamelCase__ ,self.sp_model_kwargs )
_UpperCamelCase : Union[str, Any] = len(self.encoder )
_UpperCamelCase : Dict = {
self.get_lang_token(lowerCamelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase__ )
}
_UpperCamelCase : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase__ )}
_UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCamelCase : Any = src_lang if src_lang is not None else 'en'
_UpperCamelCase : int = tgt_lang
_UpperCamelCase : Optional[int] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCamelCase : List[Any] = num_madeup_words
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCamelCase__ ,self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCamelCase__ ,self.unk_token )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCamelCase : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Tuple = [1] * len(self.prefix_tokens )
_UpperCamelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = self.__dict__.copy()
_UpperCamelCase : int = None
return state
def __setstate__( self : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_UpperCamelCase : int = {}
_UpperCamelCase : Dict = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
_UpperCamelCase : int = Path(lowerCamelCase__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_UpperCamelCase : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_UpperCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ ,'wb' ) as fi:
_UpperCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "en" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "ro" ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = src_lang
_UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : int = src_lang
_UpperCamelCase : Any = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = self.get_lang_id(lowerCamelCase__ )
_UpperCamelCase : Any = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_lang_token(lowerCamelCase__ )
_UpperCamelCase : str = self.lang_token_to_id[lang_token]
_UpperCamelCase : Any = [self.cur_lang_id]
_UpperCamelCase : Union[str, Any] = [self.eos_token_id]
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_lang_token(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
_UpperCamelCase : Union[str, Any] = [self.cur_lang_id]
_UpperCamelCase : List[str] = [self.eos_token_id]
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_lang_token(lowerCamelCase__ )
return self.lang_token_to_id[lang_token]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def A__ ( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' ) as f:
return json.load(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 195 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
UpperCamelCase__ : str = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCamelCase__ : Optional[int] = """summarizer"""
UpperCamelCase__ : Dict = AutoTokenizer
UpperCamelCase__ : str = AutoModelForSeqaSeqLM
UpperCamelCase__ : Any = ["""text"""]
UpperCamelCase__ : str = ["""text"""]
def _a ( self , a_ ):
return self.pre_processor(a_ , return_tensors="""pt""" , truncation=a_ )
def _a ( self , a_ ):
return self.model.generate(**a_ )[0]
def _a ( self , a_ ):
return self.pre_processor.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
| 351 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {"""bert_for_seq_generation""": 512}
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<pad>" , a_="<::::>" , a_ = None , **a_ , ):
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _a ( self ):
return self.sp_model.get_piece_size()
def _a ( self ):
a__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self , a_ ):
a__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ):
return self.sp_model.piece_to_id(a_ )
def _a ( self , a_ ):
a__ = self.sp_model.IdToPiece(a_ )
return token
def _a ( self , a_ ):
a__ = []
a__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _a ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , """wb""" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 351 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self: int , a: List[str] , a: str=13 , a: Union[str, Any]=7 , a: Optional[Any]=True , a: Optional[Any]=True , a: str=True , a: Optional[int]=True , a: Optional[int]=99 , a: str=32 , a: str=5 , a: Optional[int]=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: int=0.1 , a: Dict=0.1 , a: Dict=1_28 , a: Optional[Any]=32 , a: str=16 , a: int=2 , a: List[str]=0.02 , a: Optional[Any]=3 , a: str=4 , a: Optional[int]=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[str] , a: Optional[Any] , a: Optional[Any] , a: Tuple , a: Tuple , a: Union[str, Any] , a: str) ->Any:
'''simple docstring'''
a_ = NezhaModel(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a)
a_ = model(a , token_type_ids=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Any , a: int , a: Tuple , a: Dict , a: List[Any] , a: Optional[int] , a: Optional[int] , a: Optional[int] , a: List[Any] , ) ->int:
'''simple docstring'''
a_ = True
a_ = NezhaModel(a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
a_ = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
a_ = model(a , attention_mask=a , token_type_ids=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[Any] , a: Dict , a: List[Any] , a: Union[str, Any] , a: List[Any] , a: List[Any] , a: List[str]) ->List[str]:
'''simple docstring'''
a_ = NezhaForMaskedLM(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: int , a: Dict , a: Dict , a: Dict , a: Dict , a: Optional[int] , a: Any , a: Union[str, Any]) ->str:
'''simple docstring'''
a_ = NezhaForNextSentencePrediction(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _lowerCAmelCase ( self: int , a: Optional[int] , a: int , a: Any , a: int , a: str , a: Union[str, Any] , a: str) ->int:
'''simple docstring'''
a_ = NezhaForPreTraining(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: str , a: Dict , a: int , a: int , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = NezhaForQuestionAnswering(config=a)
model.to(a)
model.eval()
a_ = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: List[Any] , a: Optional[Any] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Dict , a: Dict) ->Dict:
'''simple docstring'''
a_ = self.num_labels
a_ = NezhaForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self: Any , a: List[str] , a: Optional[Any] , a: Any , a: Optional[Any] , a: Optional[Any] , a: List[Any] , a: Optional[int]) ->str:
'''simple docstring'''
a_ = self.num_labels
a_ = NezhaForTokenClassification(config=a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: List[Any] , a: List[str] , a: int , a: List[Any] , a: Optional[Any] , a: Optional[Any] , a: int , a: Dict) ->Tuple:
'''simple docstring'''
a_ = self.num_choices
a_ = NezhaForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase =True
def _lowerCAmelCase ( self: str , a: Tuple , a: Dict , a: Optional[int]=False) ->Optional[int]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class in get_values(a):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = NezhaModelTester(self)
a_ = ConfigTester(self , config_class=a , hidden_size=37)
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a)
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a)
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a)
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a)
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a)
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def _lowerCAmelCase ( self: Tuple) ->Tuple:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = NezhaModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "bert.pt"))
a_ = torch.jit.load(os.path.join(a , "bert.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(a , attention_mask=a)[0]
a_ = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(a , attention_mask=a)[0]
a_ = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
def A_ ( self ):
super().setup()
_lowerCamelCase : str = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowercase , **lowercase ):
_lowerCamelCase : str = super().__call__(*lowercase , **lowercase )
_lowerCamelCase : Optional[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def cross_entropy(lowercase__ , lowercase__ , lowercase__=None ):
_lowerCamelCase : List[Any] = logits.shape[-1]
_lowerCamelCase : Optional[Any] = (labels[..., None] == jnp.arange(lowercase__ )[None]).astype('f4' )
_lowerCamelCase : List[Any] = jax.nn.log_softmax(lowercase__ , axis=-1 )
_lowerCamelCase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase : Any = reduction(lowercase__ )
return loss
_lowerCamelCase : str = partial(lowercase__ , reduction=jnp.mean )
_lowerCamelCase : Any = cross_entropy(lowercase__ , lowercase__ )
_lowerCamelCase : List[str] = cross_entropy(lowercase__ , lowercase__ )
_lowerCamelCase : Tuple = cross_entropy(lowercase__ , lowercase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = "google/bigbird-roberta-base"
lowerCamelCase__ = 30_00
lowerCamelCase__ = 1_05_00
lowerCamelCase__ = 1_28
lowerCamelCase__ = 3
lowerCamelCase__ = 1
lowerCamelCase__ = 5
# tx_args
lowerCamelCase__ = 3e-5
lowerCamelCase__ = 0.0
lowerCamelCase__ = 2_00_00
lowerCamelCase__ = 0.0095
lowerCamelCase__ = "bigbird-roberta-natural-questions"
lowerCamelCase__ = "training-expt"
lowerCamelCase__ = "data/nq-training.jsonl"
lowerCamelCase__ = "data/nq-validation.jsonl"
def A_ ( self ):
os.makedirs(self.base_dir , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(self.base_dir , self.save_dir )
_lowerCamelCase : List[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 40_96 # no dynamic padding on TPUs
def __call__( self , lowercase ):
_lowerCamelCase : Dict = self.collate_fn(lowercase )
_lowerCamelCase : Union[str, Any] = jax.tree_util.tree_map(lowercase , lowercase )
return batch
def A_ ( self , lowercase ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.fetch_inputs(features['input_ids'] )
_lowerCamelCase : Dict = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa ),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = [self._fetch_inputs(lowercase ) for ids in input_ids]
return zip(*lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = [1 for _ in range(len(lowercase ) )]
while len(lowercase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None ):
if seed is not None:
_lowerCamelCase : Union[str, Any] = dataset.shuffle(seed=lowercase__ )
for i in range(len(lowercase__ ) // batch_size ):
_lowerCamelCase : Dict = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase__ )
@partial(jax.pmap , axis_name='batch' )
def _snake_case ( lowercase__ , lowercase__ , **lowercase__ ):
def loss_fn(lowercase__ ):
_lowerCamelCase : Optional[Any] = model_inputs.pop('start_labels' )
_lowerCamelCase : List[Any] = model_inputs.pop('end_labels' )
_lowerCamelCase : Any = model_inputs.pop('pooled_labels' )
_lowerCamelCase : Optional[Any] = state.apply_fn(**lowercase__ , params=lowercase__ , dropout_rng=lowercase__ , train=lowercase__ )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = outputs
return state.loss_fn(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_lowerCamelCase, _lowerCamelCase : Tuple = jax.random.split(lowercase__ )
_lowerCamelCase : Dict = jax.value_and_grad(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Any = grad_fn(state.params )
_lowerCamelCase : Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
_lowerCamelCase : Union[str, Any] = jax.lax.pmean(lowercase__ , 'batch' )
_lowerCamelCase : Optional[Any] = state.apply_gradients(grads=lowercase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : List[Any] = model_inputs.pop('start_labels' )
_lowerCamelCase : str = model_inputs.pop('end_labels' )
_lowerCamelCase : Any = model_inputs.pop('pooled_labels' )
_lowerCamelCase : str = state.apply_fn(**lowercase__ , params=state.params , train=lowercase__ )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = outputs
_lowerCamelCase : Optional[int] = state.loss_fn(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[str] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class lowerCAmelCase__ ( train_state.TrainState ):
'''simple docstring'''
lowerCamelCase__ = struct.field(pytree_node=lowercase )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
def A_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
_lowerCamelCase : Dict = model.params
_lowerCamelCase : Optional[int] = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = restore_checkpoint(lowercase , lowercase )
_lowerCamelCase : Dict = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
_lowerCamelCase, _lowerCamelCase : List[Any] = build_tx(**lowercase )
_lowerCamelCase : List[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
_lowerCamelCase : Optional[int] = args
_lowerCamelCase : str = data_collator
_lowerCamelCase : Optional[int] = lr
_lowerCamelCase : Dict = params
_lowerCamelCase : Tuple = jax_utils.replicate(lowercase )
return state
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = self.args
_lowerCamelCase : Optional[Any] = len(lowercase ) // args.batch_size
_lowerCamelCase : int = jax.random.PRNGKey(0 )
_lowerCamelCase : List[Any] = jax.random.split(lowercase , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase : int = get_batched_dataset(lowercase , args.batch_size , seed=lowercase )
_lowerCamelCase : List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc=F'''Running EPOCH-{epoch}''' ):
_lowerCamelCase : Optional[Any] = self.data_collator(lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self.train_step_fn(lowercase , lowercase , **lowercase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase : Optional[Any] = jax_utils.unreplicate(state.step )
_lowerCamelCase : str = running_loss.item() / i
_lowerCamelCase : Optional[int] = self.scheduler_fn(state_step - 1 )
_lowerCamelCase : List[Any] = self.evaluate(lowercase , lowercase )
_lowerCamelCase : List[Any] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase ) )
self.logger.log(lowercase , commit=lowercase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=lowercase )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : List[str] = get_batched_dataset(lowercase , self.args.batch_size )
_lowerCamelCase : Tuple = len(lowercase ) // self.args.batch_size
_lowerCamelCase : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase : Tuple = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... ' ):
_lowerCamelCase : List[Any] = self.data_collator(lowercase )
_lowerCamelCase : Tuple = self.val_step_fn(lowercase , **lowercase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Any = jax_utils.unreplicate(lowercase )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(lowercase , params=state.params )
with open(os.path.join(lowercase , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib' ) )
with open(os.path.join(lowercase , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , lowercase )
print('DONE' )
def _snake_case ( lowercase__ , lowercase__ ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(lowercase__ , 'flax_model.msgpack' ) , 'rb' ) as f:
_lowerCamelCase : int = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase__ , 'opt_state.msgpack' ) , 'rb' ) as f:
_lowerCamelCase : Tuple = from_bytes(state.opt_state , f.read() )
_lowerCamelCase : List[Any] = joblib.load(os.path.join(lowercase__ , 'args.joblib' ) )
_lowerCamelCase : int = joblib.load(os.path.join(lowercase__ , 'data_collator.joblib' ) )
with open(os.path.join(lowercase__ , 'training_state.json' ) , 'r' ) as f:
_lowerCamelCase : str = json.load(lowercase__ )
_lowerCamelCase : int = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = num_train_steps - warmup_steps
_lowerCamelCase : Tuple = optax.linear_schedule(init_value=lowercase__ , end_value=lowercase__ , transition_steps=lowercase__ )
_lowerCamelCase : Any = optax.linear_schedule(init_value=lowercase__ , end_value=1E-7 , transition_steps=lowercase__ )
_lowerCamelCase : List[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def weight_decay_mask(lowercase__ ):
_lowerCamelCase : int = traverse_util.flatten_dict(lowercase__ )
_lowerCamelCase : List[Any] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase__ )
_lowerCamelCase : List[Any] = scheduler_fn(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : Any = optax.adamw(learning_rate=lowercase__ , weight_decay=lowercase__ , mask=lowercase__ )
return tx, lr | 492 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size | 492 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = '''last'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=A_ )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE__ = model(A_ )[0]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 100 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """dandelin/vilt-b32-finetuned-vqa"""
__lowercase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__lowercase = """image_qa"""
__lowercase = AutoProcessor
__lowercase = AutoModelForVisualQuestionAnswering
__lowercase = ["""image""", """text"""]
__lowercase = ["""text"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCAmelCase_ ).logits
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 495 | 0 |
"""simple docstring"""
import heapq
def A_ ( _lowerCAmelCase : dict ):
"""simple docstring"""
_a = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_lowerCAmelCase, [-1 * len(_lowerCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_a = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_a = heapq.heappop(_lowerCAmelCase )[1][0]
chosen_vertices.add(_lowerCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_a = elem[1][1].index(_lowerCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_lowerCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}') | 285 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 285 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "segformer"
def __init__( self: List[str], a_: Any=3, a_: List[Any]=4, a_: int=[2, 2, 2, 2], a_: Any=[8, 4, 2, 1], a_: List[str]=[32, 64, 160, 256], a_: str=[7, 3, 3, 3], a_: int=[4, 2, 2, 2], a_: int=[1, 2, 5, 8], a_: Dict=[4, 4, 4, 4], a_: Optional[int]="gelu", a_: Union[str, Any]=0.0, a_: int=0.0, a_: List[Any]=0.1, a_: Dict=0.02, a_: Union[str, Any]=0.1, a_: Optional[Any]=1E-6, a_: List[Any]=256, a_: Optional[int]=255, **a_: str, ):
'''simple docstring'''
super().__init__(**lowercase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""", lowercase_, )
_snake_case : Optional[Any] = num_channels
_snake_case : Optional[Any] = num_encoder_blocks
_snake_case : List[Any] = depths
_snake_case : Union[str, Any] = sr_ratios
_snake_case : Dict = hidden_sizes
_snake_case : str = patch_sizes
_snake_case : Optional[Any] = strides
_snake_case : Union[str, Any] = mlp_ratios
_snake_case : Any = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : List[Any] = classifier_dropout_prob
_snake_case : Any = initializer_range
_snake_case : Optional[int] = drop_path_rate
_snake_case : List[Any] = layer_norm_eps
_snake_case : Tuple = decoder_hidden_size
_snake_case : int = kwargs.get("""reshape_last_stage""", lowercase_ )
_snake_case : Tuple = semantic_loss_ignore_index
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 12
| 609 | from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase__ ( a__ , a__ , a__ = None ) ->str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCamelCase = quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type="dataset" , revision=a__ )
| 547 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowercase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :str = "upernet"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=384 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = backbone_config.get('''model_type''' )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase_ = backbone_config
lowerCamelCase_ = hidden_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = pool_scales
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_in_channels
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = loss_ignore_index
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output | 705 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCamelCase_ = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCamelCase__ , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCamelCase__ )
lowerCamelCase_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 66 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowercase :
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : bytes ) -> None:
"""simple docstring"""
lowerCAmelCase = data
# Initialize hash values
lowerCAmelCase = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
lowerCAmelCase = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
lowerCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase (SCREAMING_SNAKE_CASE_ : bytes ) -> bytes:
"""simple docstring"""
lowerCAmelCase = B'''\x80''' + (B'''\x00''' * (63 - (len(SCREAMING_SNAKE_CASE_ ) + 8) % 64))
lowerCAmelCase = struct.pack('''>Q''' ,(len(SCREAMING_SNAKE_CASE_ ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase (self : str ) -> None:
"""simple docstring"""
lowerCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase = list(struct.unpack('''>16L''' ,SCREAMING_SNAKE_CASE_ ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
lowerCAmelCase = self.ror(SCREAMING_SNAKE_CASE_ ,6 ) ^ self.ror(SCREAMING_SNAKE_CASE_ ,11 ) ^ self.ror(SCREAMING_SNAKE_CASE_ ,25 )
lowerCAmelCase = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
lowerCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
lowerCAmelCase = self.ror(SCREAMING_SNAKE_CASE_ ,2 ) ^ self.ror(SCREAMING_SNAKE_CASE_ ,13 ) ^ self.ror(SCREAMING_SNAKE_CASE_ ,22 )
lowerCAmelCase = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase = (sa + maj) % 0X1_00_00_00_00
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
lowerCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase = ''''''.join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : str ) -> None:
"""simple docstring"""
import hashlib
lowerCAmelCase = bytes('''Test String''' ,'''utf-8''' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE_ ).hash ,hashlib.shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest() )
def __magic_name__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''-s''', '''--string''', dest='''input_string''', default='''Hello World!! Welcome to Cryptography''', help='''Hash the string''', )
parser.add_argument(
'''-f''', '''--file''', dest='''input_file''', help='''Hash contents of a file''' )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, '''rb''' ) as f:
lowerCAmelCase = f.read()
else:
lowerCAmelCase = bytes(_lowerCamelCase, '''utf-8''' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 535 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetDownsampleBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Dict ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaD # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDCrossAttn # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase = '''mid'''
@property
def UpperCAmelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetUpsampleBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ,include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(SCREAMING_SNAKE_CASE_ )
| 535 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase = '''UperNetConfig'''
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 0 , snake_case = False , snake_case = 1 , ):
super().__init__()
lowercase = nn.Convad(
in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , padding=snake_case , bias=snake_case , dilation=snake_case , )
lowercase = nn.BatchNormad(snake_case )
lowercase = nn.ReLU()
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.conv(snake_case )
lowercase = self.batch_norm(snake_case )
lowercase = self.activation(snake_case )
return output
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case ):
super().__init__()
lowercase = [
nn.AdaptiveAvgPoolad(snake_case ),
UperNetConvModule(snake_case , snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = input
for layer in self.layers:
lowercase = layer(snake_case )
return hidden_state
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case ):
super().__init__()
lowercase = pool_scales
lowercase = align_corners
lowercase = in_channels
lowercase = channels
lowercase = []
for i, pool_scale in enumerate(snake_case ):
lowercase = UperNetPyramidPoolingBlock(pool_scale=snake_case , in_channels=snake_case , channels=snake_case )
self.blocks.append(snake_case )
self.add_module(str(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for ppm in self.blocks:
lowercase = ppm(snake_case )
lowercase = nn.functional.interpolate(
snake_case , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(snake_case )
return ppm_outs
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
super().__init__()
lowercase = config
lowercase = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase = in_channels
lowercase = config.hidden_size
lowercase = False
lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase = nn.ModuleList()
lowercase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase = UperNetConvModule(snake_case , self.channels , kernel_size=1 )
lowercase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case )
self.fpn_convs.append(snake_case )
lowercase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE__ ( self ):
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = inputs[-1]
lowercase = [x]
psp_outs.extend(self.psp_modules(snake_case ) )
lowercase = torch.cat(snake_case , dim=1 )
lowercase = self.bottleneck(snake_case )
return output
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# build laterals
lowercase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case ) )
# build top-down path
lowercase = len(snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase = laterals[i - 1].shape[2:]
lowercase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowercase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowercase = torch.cat(snake_case , dim=1 )
lowercase = self.fpn_bottleneck(snake_case )
lowercase = self.classifier(snake_case )
return output
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 2 , snake_case = 3 , snake_case = 1 ):
super().__init__()
lowercase = config
lowercase = config.auxiliary_in_channels
lowercase = config.auxiliary_channels
lowercase = config.auxiliary_num_convs
lowercase = config.auxiliary_concat_input
lowercase = in_index
lowercase = (kernel_size // 2) * dilation
lowercase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) )
if self.num_convs == 0:
lowercase = nn.Identity()
else:
lowercase = nn.Sequential(*snake_case )
if self.concat_input:
lowercase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case , padding=kernel_size // 2 )
lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# just take the relevant feature maps
lowercase = encoder_hidden_states[self.in_index]
lowercase = self.convs(snake_case )
if self.concat_input:
lowercase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase = self.classifier(snake_case )
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = UperNetConfig
_UpperCamelCase : Tuple = """pixel_values"""
_UpperCamelCase : Any = True
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if isinstance(snake_case , snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__ ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
lowercase = value
UpperCAmelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __lowerCamelCase , )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__(snake_case )
lowercase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase = UperNetHead(snake_case , in_channels=self.backbone.channels )
lowercase = UperNetFCNHead(snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase = self.backbone.forward_with_filtered_kwargs(
snake_case , output_hidden_states=snake_case , output_attentions=snake_case )
lowercase = outputs.feature_maps
lowercase = self.decode_head(snake_case )
lowercase = nn.functional.interpolate(snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case )
lowercase = None
if self.auxiliary_head is not None:
lowercase = self.auxiliary_head(snake_case )
lowercase = nn.functional.interpolate(
snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case )
lowercase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowercase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase = loss_fct(snake_case , snake_case )
lowercase = loss_fct(snake_case , snake_case )
lowercase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase = (logits,) + outputs[1:]
else:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 565 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''DeiTFeatureExtractor''']
UpperCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase__ ( ):
snake_case_ : Optional[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def lowerCAmelCase__ ( ):
snake_case_ : Optional[int] = parse_args()
# Import training_script as a module.
snake_case_ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Dict = script_fpath.stem
snake_case_ : Optional[Any] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
snake_case_ : str = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 568 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 0 |
from manim import *
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.5 ,width=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : str = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Any = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Any = VGroup(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : int = Text('CPU' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : Dict = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Tuple = Text('GPU' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : List[Any] = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
gpu.align_to(snake_case__ ,snake_case__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Dict = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : List[str] = Text('Model' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : Any = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case__ ,run_time=1 ) ,Create(snake_case__ ,run_time=1 ) ,Create(snake_case__ ,run_time=1 ) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
SCREAMING_SNAKE_CASE_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ : int = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ ,run_time=2.5 ) ,Write(snake_case__ ) ,Write(snake_case__ ) )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = []
for i, rect in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case__ ,opacity=0.7 )
cpu_target.move_to(snake_case__ )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : int = 0.46 / 4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=snake_case__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=snake_case__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=snake_case__ ,buff=0.0 )
cpu_targs.append(snake_case__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) )
second_animations.append(MoveToTarget(snake_case__ ,run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
A__ : List[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
A__ : List[str] = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
A__ : List[str] = '▁'
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
_lowercase: Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
_lowercase: Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_lowercase: Optional[Any] = vocab_file
_lowercase: Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
_lowercase: Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_lowercase: Dict = len(self.sp_model ) - 1
_lowercase: Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase: Any = [self.cls_token_id]
_lowercase: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowercase: Optional[int] = [self.sep_token_id]
_lowercase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: int = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def lowercase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase: Optional[int] = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def lowercase_ ( self , A_ ) -> Dict:
"""simple docstring"""
_lowercase: Optional[Any] = []
_lowercase: List[Any] = ''''''
_lowercase: Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
_lowercase: int = True
_lowercase: Tuple = []
else:
current_sub_tokens.append(A_ )
_lowercase: List[Any] = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self ) -> Tuple:
"""simple docstring"""
_lowercase: Dict = self.__dict__.copy()
_lowercase: str = None
return state
def __setstate__( self , A_ ) -> Tuple:
"""simple docstring"""
_lowercase: Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowercase: str = {}
_lowercase: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase: Tuple = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
_lowercase: str = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 272 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number | (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number & ~(1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number ^ (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
lowercase__ : Tuple = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowercase__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Optional[int] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _lowerCAmelCase ( __snake_case : str ) -> str:
if set(__snake_case ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__A : str = ''
for word in coded.split():
while len(__snake_case ) != 0:
decoded += decode_dict[word[:5]]
__A : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 8 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__a : List[str] = random.Random()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if rng is None:
lowercase__ : Optional[Any] = global_rng
lowercase__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=80 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase="hann_window" , lowerCamelCase=80 , lowerCamelCase=7600 , lowerCamelCase=1E-10 , lowerCamelCase=True , ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = min_seq_length
lowercase__ : Optional[int] = max_seq_length
lowercase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : List[Any] = feature_size
lowercase__ : Union[str, Any] = padding_value
lowercase__ : Dict = sampling_rate
lowercase__ : int = do_normalize
lowercase__ : Union[str, Any] = num_mel_bins
lowercase__ : Optional[Any] = hop_length
lowercase__ : Tuple = win_length
lowercase__ : Any = win_function
lowercase__ : Optional[Any] = fmin
lowercase__ : str = fmax
lowercase__ : Union[str, Any] = mel_floor
lowercase__ : str = return_attention_mask
def __a ( self ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowercase__ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Dict = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> Optional[int]:
"""simple docstring"""
if equal_length:
lowercase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = SpeechTaFeatureExtractor
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : str = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ : int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Any = ["longest", "max_length", "do_not_pad"]
lowercase__ : List[Any] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors="np" )
lowercase__ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = range(800 , 1400 , 200 )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in lengths]
lowercase__ : Tuple = ["longest", "max_length", "do_not_pad"]
lowercase__ : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : List[str] = feat_extract(lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase )
lowercase__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="max_length" , return_tensors="np" )
lowercase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="longest" , return_tensors="np" )
lowercase__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Union[str, Any] = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=2000 , padding="longest" , return_tensors="np" )
lowercase__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Tuple = np.random.rand(100 ).astype(np.floataa )
lowercase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase__ : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : str = feature_extractor(audio_target=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase__ : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Dict = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Optional[Any] = np.asarray(lowerCamelCase )
lowercase__ : List[Any] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Dict = feat_extract.model_input_names[0]
lowercase__ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowercase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowercase__ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowercase__ : Optional[int] = feat_extract.num_mel_bins # hack!
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.feat_extract_dict
lowercase__ : int = True
lowercase__ : Optional[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = feat_extract.num_mel_bins # hack!
lowercase__ : int = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.feat_extract_dict
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : List[str] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = min(lowerCamelCase )
lowercase__ : List[str] = feat_extract.num_mel_bins # hack!
lowercase__ : Dict = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowercase__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : int = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
lowercase__ : List[Any] = self._load_datasamples(1 )
lowercase__ : int = SpeechTaFeatureExtractor()
lowercase__ : Tuple = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase , atol=1E-6 ) )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowercase__ : Any = self._load_datasamples(1 )
lowercase__ : List[Any] = SpeechTaFeatureExtractor()
lowercase__ : int = feature_extractor(audio_target=lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) ) | 397 | 0 |
"""simple docstring"""
def snake_case ( _a: Optional[int] , _a: Any , _a: List[Any] , _a: int )-> Optional[int]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCamelCase__ = mf_knapsack(i - 1 , _a , _a , _a )
else:
lowerCamelCase__ = max(
mf_knapsack(i - 1 , _a , _a , _a ) , mf_knapsack(i - 1 , _a , _a , j - wt[i - 1] ) + val[i - 1] , )
lowerCamelCase__ = val
return f[i][j]
def snake_case ( _a: str , _a: List[str] , _a: int , _a: Tuple )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCamelCase__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCamelCase__ = dp[i - 1][w_]
return dp[n][w_], dp
def snake_case ( _a: int , _a: list , _a: list )-> str:
'''simple docstring'''
if not (isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowerCamelCase__ = len(_a )
if num_items != len(_a ):
lowerCamelCase__ = (
'The number of weights must be the same as the number of values.\n'
F'But got {num_items} weights and {len(_a )} values'
)
raise ValueError(_a )
for i in range(_a ):
if not isinstance(wt[i] , _a ):
lowerCamelCase__ = (
'All weights must be integers but got weight of '
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(_a )
lowerCamelCase__ , lowerCamelCase__ = knapsack(_a , _a , _a , _a )
lowerCamelCase__ = set()
_construct_solution(_a , _a , _a , _a , _a )
return optimal_val, example_optional_set
def snake_case ( _a: list , _a: list , _a: int , _a: int , _a: set )-> str:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_a , _a , i - 1 , _a , _a )
else:
optimal_set.add(_a )
_construct_solution(_a , _a , i - 1 , j - wt[i - 1] , _a )
if __name__ == "__main__":
_snake_case = [3, 2, 4, 4]
_snake_case = [4, 3, 2, 3]
_snake_case = 4
_snake_case = 6
_snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_snake_case , _snake_case = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_snake_case , _snake_case = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 659 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a : Dict =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **snake_case__ ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase : int = deprecated_arg[3:]
lowerCAmelCase : List[str] = not kwargs.pop(snake_case__ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase : str = kwargs.pop("tpu_name" , self.tpu_name )
lowerCAmelCase : Union[str, Any] = kwargs.pop("device_idx" , self.device_idx )
lowerCAmelCase : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
lowerCAmelCase : Tuple = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**snake_case__ )
a : str =field(
default=__lowerCamelCase , metadata={"help": "Name of TPU"} , )
a : int =field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
a : bool =field(default=__lowerCamelCase , metadata={"help": "Benchmark models in eager model."} )
a : bool =field(
default=__lowerCamelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
lowerCAmelCase : List[str] = None
if self.tpu:
try:
if self.tpu_name:
lowerCAmelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCAmelCase : List[Any] = None
return tpu
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCAmelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
lowerCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
lowerCAmelCase : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
UpperCamelCase__ = eval_examples
UpperCamelCase__ = post_process_function
def snake_case__ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case = "eval" ):
'''simple docstring'''
UpperCamelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase__ = self.get_eval_dataloader(snake_case )
UpperCamelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ = self.compute_metrics
UpperCamelCase__ = None
UpperCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase__ = time.time()
try:
UpperCamelCase__ = eval_loop(
snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
UpperCamelCase__ = compute_metrics
UpperCamelCase__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase__ = self.post_process_function(snake_case , snake_case , output.predictions )
UpperCamelCase__ = self.compute_metrics(snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCamelCase__ = metrics.pop(snake_case )
metrics.update(output.metrics )
else:
UpperCamelCase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case )
return metrics
def snake_case__ ( self , snake_case , snake_case , snake_case=None , snake_case = "test" ):
'''simple docstring'''
UpperCamelCase__ = self.get_test_dataloader(snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ = self.compute_metrics
UpperCamelCase__ = None
UpperCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase__ = time.time()
try:
UpperCamelCase__ = eval_loop(
snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
UpperCamelCase__ = compute_metrics
UpperCamelCase__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase__ = self.post_process_function(snake_case , snake_case , output.predictions , "predict" )
UpperCamelCase__ = self.compute_metrics(snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCamelCase__ = metrics.pop(snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case )
| 185 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCamelCase = random.Random()
def UpperCamelCase_( _A :Tuple , _A :str=1.0 , _A :int=None , _A :Dict=None )-> str:
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=400 , snake_case=2000 , snake_case=10 , snake_case=160 , snake_case=8 , snake_case=0.0 , snake_case=4000 , snake_case=False , snake_case=True , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
UpperCamelCase__ = feature_size
UpperCamelCase__ = chunk_length
UpperCamelCase__ = hop_length
def snake_case__ ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , snake_case=False , snake_case=False ):
'''simple docstring'''
def _flatten(snake_case ):
return list(itertools.chain(*snake_case ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
UpperCamelCase__ = self.feature_extraction_class.from_pretrained(snake_case )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
UpperCamelCase__ = self.feature_extraction_class.from_json_file(snake_case )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(snake_case , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(snake_case )
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test truncation required
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs]
UpperCamelCase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
def snake_case__ ( self ):
'''simple docstring'''
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = WhisperFeatureExtractor()
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1E-4 ) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = self._load_datasamples(1 )[0]
UpperCamelCase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
UpperCamelCase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1E-3 ) )
| 185 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Tuple = set()
# Replace all the whitespace in our sentence
snake_case__ : List[Any] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case__ : int = True
elif char.isupper():
snake_case__ : Optional[Any] = True
return all(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
snake_case__ : Optional[Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_faster()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_fastest()""" , setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 38 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Optional[int] = '''down'''
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Tuple = '''down'''
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Tuple = '''down'''
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''down'''
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Dict = 32
return init_dict, inputs_dict
def a_ ( self : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Tuple = '''down'''
@property
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case__ )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Tuple = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : str = '''down'''
@property
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case__ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''down'''
@property
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case__ )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : str = '''down'''
@property
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {
'''in_channels''': 32,
'''out_channels''': 32,
}
_UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : str = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : List[str] = '''down'''
@property
def a_ ( self : Any ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : List[str] = {
'''in_channels''': 32,
'''out_channels''': 32,
}
_UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : str = '''mid'''
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
_UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE_ : int = '''mid'''
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Dict = 32
return init_dict, inputs_dict
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''mid'''
@property
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case__ )
def a_ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = 32
return init_dict, inputs_dict
def a_ ( self : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : List[str] = '''up'''
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Tuple = '''up'''
@property
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : List[str] = '''up'''
@property
def a_ ( self : int ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def a_ ( self : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = 32
return init_dict, inputs_dict
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : List[Any] = '''up'''
@property
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ , include_encoder_hidden_states=snake_case__ )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = 32
return init_dict, inputs_dict
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''up'''
@property
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : List[str] = '''up'''
@property
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''up'''
@property
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case__ )
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : Optional[int] = '''up'''
@property
def a_ ( self : Any ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def a_ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {'''in_channels''': 32, '''out_channels''': 32}
_UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(snake_case__ )
class lowerCAmelCase_ ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE_ : str = '''up'''
@property
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = {'''in_channels''': 32, '''out_channels''': 32}
_UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(snake_case__ )
| 706 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[Any] = 'bart'
UpperCAmelCase__ : Optional[Any] = True
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
if LOAD_DENSE_INDEX:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCAmelCase : Tuple = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCAmelCase : int = qar_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCAmelCase : str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCAmelCase : Optional[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCAmelCase : Dict = sas_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
if LOAD_DENSE_INDEX:
_UpperCAmelCase : Optional[int] = faiss.StandardGpuResources()
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCAmelCase : str = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCAmelCase : Optional[Any] = faiss.IndexFlatIP(128 )
_UpperCAmelCase : str = faiss.index_cpu_to_gpu(_UpperCamelCase , 1 , _UpperCamelCase )
wikiaab_gpu_index_flat.add(_UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCAmelCase , _UpperCAmelCase : List[str] = (None, None)
_UpperCAmelCase : Dict = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
_UpperCAmelCase : int = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCAmelCase : Dict = elia['''train_eli5''']
_UpperCAmelCase : Dict = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCamelCase )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ , UpperCAmelCase__ : str = load_train_data()
def _A ( _UpperCamelCase , _UpperCamelCase=10 ):
_UpperCAmelCase : Union[str, Any] = embed_questions_for_retrieval([question] , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase : Tuple = eli5_train_q_index.search(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Dict = [elia_train[int(_UpperCamelCase )] for i in I[0]]
return nn_examples
def _A ( _UpperCamelCase , _UpperCamelCase="wiki40b" , _UpperCamelCase="dense" , _UpperCamelCase=10 ):
if source == "none":
_UpperCAmelCase , _UpperCAmelCase : Tuple = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCAmelCase , _UpperCAmelCase : Dict = query_qa_dense_index(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = query_es_index(
_UpperCamelCase , _UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCamelCase , )
_UpperCAmelCase : List[Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCAmelCase : Union[str, Any] = '''question: {} context: {}'''.format(_UpperCamelCase , _UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCamelCase : None),
} )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=64 , _UpperCamelCase=256 , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=0.95 , _UpperCamelCase=0.8 ):
with torch.no_grad():
_UpperCAmelCase : Dict = qa_sas_generate(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_answers=1 , num_beams=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase , do_sample=_UpperCamelCase , temp=_UpperCamelCase , top_p=_UpperCamelCase , top_k=_UpperCamelCase , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : Tuple = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Dict = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : Optional[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : Optional[Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : List[Any] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : int = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : Union[str, Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Any = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : str = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : List[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Optional[Any] = 'wiki40b'
UpperCAmelCase__ : str = 'dense'
UpperCAmelCase__ : Optional[Any] = 'beam'
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Tuple = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Dict = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : List[str] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : List[str] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Dict = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : Any = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Any = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = None
# start main text
UpperCAmelCase__ : Tuple = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : Dict = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : List[str] = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ : Dict = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : Optional[int] = support_list[:10]
UpperCAmelCase__ : Union[str, Any] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Optional[int] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Optional[Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : List[Any] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : List[Any] = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : str = find_nearest_training(question)
UpperCAmelCase__ : Union[str, Any] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : List[str] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 416 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0.2 , __magic_name__=0.2 ):
lowerCamelCase : str = bp_numa
lowerCamelCase : List[Any] = bp_numa
lowerCamelCase : List[Any] = bp_numa
lowerCamelCase : Tuple = conva_get[:2]
lowerCamelCase : str = conva_get[2]
lowerCamelCase : Union[str, Any] = size_pa
lowerCamelCase : Union[str, Any] = rate_w
lowerCamelCase : str = rate_t
lowerCamelCase : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase : Dict = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCamelCase__ ( self , __magic_name__ ):
# save model dict with pickle
lowerCamelCase : List[str] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__magic_name__ , """wb""" ) as f:
pickle.dump(__magic_name__ , __magic_name__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ):
# read saved model
with open(__magic_name__ , """rb""" ) as f:
lowerCamelCase : List[Any] = pickle.load(__magic_name__ ) # noqa: S301
lowerCamelCase : int = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowerCamelCase : List[Any] = model_dic.get("""size_pooling1""" )
lowerCamelCase : List[str] = model_dic.get("""num_bp1""" )
lowerCamelCase : Optional[Any] = model_dic.get("""num_bp2""" )
lowerCamelCase : Optional[int] = model_dic.get("""num_bp3""" )
lowerCamelCase : Union[str, Any] = model_dic.get("""rate_weight""" )
lowerCamelCase : Union[str, Any] = model_dic.get("""rate_thre""" )
# create model instance
lowerCamelCase : List[str] = CNN(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# modify model parameter
lowerCamelCase : Tuple = model_dic.get("""w_conv1""" )
lowerCamelCase : Optional[int] = model_dic.get("""wkj""" )
lowerCamelCase : int = model_dic.get("""vji""" )
lowerCamelCase : Dict = model_dic.get("""thre_conv1""" )
lowerCamelCase : Tuple = model_dic.get("""thre_bp2""" )
lowerCamelCase : List[str] = model_dic.get("""thre_bp3""" )
return conv_ins
def UpperCamelCase__ ( self , __magic_name__ ):
return 1 / (1 + np.exp(-1 * x ))
def UpperCamelCase__ ( self , __magic_name__ ):
return round(__magic_name__ , 3 )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# convolution process
lowerCamelCase : Dict = convs[0]
lowerCamelCase : int = convs[1]
lowerCamelCase : str = np.shape(__magic_name__ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase : int = []
for i_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
for j_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
lowerCamelCase : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__magic_name__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase : Any = []
lowerCamelCase : int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__magic_name__ ):
lowerCamelCase : int = []
for i_focus in range(len(__magic_name__ ) ):
lowerCamelCase : int = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__magic_name__ ) )
lowerCamelCase : Dict = np.asmatrix(__magic_name__ ).reshape(
__magic_name__ , __magic_name__ )
data_featuremap.append(__magic_name__ )
# expanding the data slice to One dimenssion
lowerCamelCase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__magic_name__ ) )
lowerCamelCase : int = np.asarray(__magic_name__ )
return focus_list, data_featuremap
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__="average_pool" ):
# pooling process
lowerCamelCase : Optional[int] = len(featuremaps[0] )
lowerCamelCase : str = int(size_map / size_pooling )
lowerCamelCase : Optional[int] = []
for i_map in range(len(__magic_name__ ) ):
lowerCamelCase : Union[str, Any] = featuremaps[i_map]
lowerCamelCase : List[str] = []
for i_focus in range(0 , __magic_name__ , __magic_name__ ):
for j_focus in range(0 , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__magic_name__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__magic_name__ ) )
lowerCamelCase : Tuple = np.asmatrix(__magic_name__ ).reshape(__magic_name__ , __magic_name__ )
featuremap_pooled.append(__magic_name__ )
return featuremap_pooled
def UpperCamelCase__ ( self , __magic_name__ ):
# expanding three dimension data to one dimension list
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
lowerCamelCase : List[str] = np.shape(data[i] )
lowerCamelCase : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__magic_name__ )
lowerCamelCase : Optional[int] = np.asarray(__magic_name__ )
return data_expanded
def UpperCamelCase__ ( self , __magic_name__ ):
# expanding matrix to one dimension list
lowerCamelCase : str = np.asarray(__magic_name__ )
lowerCamelCase : Any = np.shape(__magic_name__ )
lowerCamelCase : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = []
lowerCamelCase : str = 0
for i_map in range(__magic_name__ ):
lowerCamelCase : List[str] = np.ones((size_map, size_map) )
for i in range(0 , __magic_name__ , __magic_name__ ):
for j in range(0 , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = pd_pool[
i_pool
]
lowerCamelCase : Union[str, Any] = i_pool + 1
lowerCamelCase : Tuple = np.multiply(
__magic_name__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__magic_name__ )
return pd_all
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(__magic_name__ )) )
print((""" - - Shape: Teach_Data """, np.shape(__magic_name__ )) )
lowerCamelCase : Dict = 0
lowerCamelCase : int = []
lowerCamelCase : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase : Optional[int] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__magic_name__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase : Optional[int] = np.asmatrix(datas_train[p] )
lowerCamelCase : List[Any] = np.asarray(datas_teach[p] )
lowerCamelCase , lowerCamelCase : Optional[Any] = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : Tuple = self.pooling(__magic_name__ , self.size_poolinga )
lowerCamelCase : Any = np.shape(__magic_name__ )
lowerCamelCase : Optional[Any] = self._expand(__magic_name__ )
lowerCamelCase : List[Any] = data_bp_input
lowerCamelCase : Dict = np.dot(__magic_name__ , self.vji.T ) - self.thre_bpa
lowerCamelCase : List[Any] = self.sig(__magic_name__ )
lowerCamelCase : List[Any] = np.dot(__magic_name__ , self.wkj.T ) - self.thre_bpa
lowerCamelCase : Optional[int] = self.sig(__magic_name__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
lowerCamelCase : int = np.multiply(
np.dot(__magic_name__ , self.wkj ) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
lowerCamelCase : Tuple = np.dot(__magic_name__ , self.vji )
lowerCamelCase : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase : int = pd_conva_pooled.T.getA().tolist()
lowerCamelCase : Tuple = self._calculate_gradient_from_pool(
__magic_name__ , __magic_name__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase : str = self.rate_weight * np.dot(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase : List[str] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase : str = rp + 1
lowerCamelCase : int = error_count / patterns
all_mse.append(__magic_name__ )
def draw_error():
lowerCamelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__magic_name__ , """+-""" )
plt.plot(__magic_name__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(__magic_name__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def UpperCamelCase__ ( self , __magic_name__ ):
# model predict
lowerCamelCase : int = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(__magic_name__ )) )
for p in range(len(__magic_name__ ) ):
lowerCamelCase : Tuple = np.asmatrix(datas_test[p] )
lowerCamelCase , lowerCamelCase : Optional[int] = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : Any = self.pooling(__magic_name__ , self.size_poolinga )
lowerCamelCase : Union[str, Any] = self._expand(__magic_name__ )
lowerCamelCase : Optional[Any] = data_bp_input
lowerCamelCase : List[str] = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase : Tuple = self.sig(__magic_name__ )
lowerCamelCase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase : Union[str, Any] = self.sig(__magic_name__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase : List[Any] = [list(map(self.do_round , __magic_name__ ) ) for each in produce_out]
return np.asarray(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
# return the data of image after convoluting process so we can check it out
lowerCamelCase : Optional[Any] = np.asmatrix(__magic_name__ )
lowerCamelCase , lowerCamelCase : Any = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : str = self.pooling(__magic_name__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( A , A ):
if nth_term == "":
return [""]
UpperCAmelCase_ = int(A )
UpperCAmelCase_ = int(A )
UpperCAmelCase_ = []
for temp in range(int(A ) ):
series.append(F"1 / {pow(temp + 1 , int(A ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a: Optional[Any] = int(input("""Enter the last number (nth term) of the P-Series"""))
_a: Any = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power)) | 268 |
from __future__ import annotations
from collections import Counter
from random import random
class __UpperCamelCase :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = {}
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = {}
def __A ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
UpperCAmelCase_ = probability
def __A ( self : Tuple ):
'''simple docstring'''
return list(self.connections )
def __A ( self : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
UpperCAmelCase_ = Counter(graph.get_nodes() )
UpperCAmelCase_ = start
for _ in range(A ):
UpperCAmelCase_ = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 268 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = "linear"
__UpperCamelCase: Tuple = "cosine"
__UpperCamelCase: Optional[int] = "cosine_with_restarts"
__UpperCamelCase: str = "polynomial"
__UpperCamelCase: int = "constant"
__UpperCamelCase: Any = "constant_with_warmup"
__UpperCamelCase: Optional[Any] = "piecewise_constant"
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
return LambdaLR(_UpperCAmelCase , lambda _UpperCAmelCase : 1 , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int = -1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1.0 , _UpperCAmelCase ) )
return 1.0
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : str , _UpperCAmelCase : int = -1 ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Union[str, Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = rule_str.split(":" )
_UpperCAmelCase : Dict = int(_UpperCAmelCase )
_UpperCAmelCase : int = float(_UpperCAmelCase )
_UpperCAmelCase : Dict = value
_UpperCAmelCase : List[str] = float(rule_list[-1] )
def create_rules_function(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
def rule_func(_UpperCAmelCase : int ) -> float:
_UpperCAmelCase : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase : Optional[Any] = create_rules_function(_UpperCAmelCase , _UpperCAmelCase )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float = 0.5 , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : List[Any] ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=1e-7 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase : int = lr_init - lr_end
_UpperCAmelCase : Optional[Any] = num_training_steps - num_warmup_steps
_UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, SchedulerType] , _UpperCAmelCase : Optimizer , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : int = -1 , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = SchedulerType(_UpperCAmelCase )
_UpperCAmelCase : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCAmelCase , last_epoch=_UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCAmelCase , step_rules=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , num_cycles=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , power=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
| 244 | '''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE : List[str] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
require_version(deps[pkg] , _UpperCAmelCase )
| 244 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Optional[int] = use_attention_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Dict = num_choices
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[str] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase ( self ):
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[Any] = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : str = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase )
UpperCAmelCase__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[Any] = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase__ : List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[Any] = model(_UpperCAmelCase )[0]
UpperCAmelCase__ : int = 5_0000
UpperCAmelCase__ : Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ : str = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) | 599 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase_ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase_ = dict(zip(vocab, range(len(vocab))))
UpperCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = Path(tmpdirname)
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCamelCase_ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase_ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase_ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
UpperCamelCase_ = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCamelCase_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 599 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A__: str = '''bert-base-cased'''
A__: Dict = '''google/pegasus-xsum'''
A__: int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
A__: str = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
A__: Optional[int] = '''patrickvonplaten/t5-tiny-random'''
A__: Union[str, Any] = '''sshleifer/bart-tiny-random'''
A__: List[Any] = '''sshleifer/tiny-mbart'''
A__: str = '''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Path ,_UpperCAmelCase : list ) -> Tuple:
_a : Optional[Any] ="""\n""".join(_UpperCAmelCase )
Path(_UpperCAmelCase ).open("""w""" ).writelines(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.source" ) ,_UpperCAmelCase )
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.target" ) ,_UpperCAmelCase )
return tmp_dir
class A__ ( UpperCAmelCase__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
'''simple docstring'''
_a : int =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a : Any =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_a : Optional[int] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_a : Dict =4
_a : Tuple =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_a , _a : Optional[int] ="""ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_a : str =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=SCREAMING_SNAKE_CASE , max_target_length=SCREAMING_SNAKE_CASE , src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , )
_a : Any =DataLoader(SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_a : List[Any] =shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
'''simple docstring'''
_a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a : Optional[Any] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_a : List[str] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_a : Optional[Any] =4
_a : int =LegacySeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE , )
_a : Union[str, Any] =DataLoader(SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : int =AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
_a : Any =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_a : Any =tmp_dir.joinpath("""train.source""" ).open().readlines()
_a : Optional[int] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1_2_8 , SCREAMING_SNAKE_CASE )
_a : List[str] ={x.name for x in tmp_dir.iterdir()}
_a : Dict ={x.name for x in save_dir.iterdir()}
_a : str =save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 1
assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_a , _a , _a : Dict =self._get_dataset(max_len=6_4 )
_a : Any =6_4
_a : str =ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE , required_batch_size_multiple=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =[len(SCREAMING_SNAKE_CASE ) for x in batch_sampler]
assert len(set(SCREAMING_SNAKE_CASE ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) # no dropped or added examples
_a : Any =DataLoader(SCREAMING_SNAKE_CASE , batch_sampler=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_a : Union[str, Any] =[]
_a : str =[]
for batch in data_loader:
_a : Dict =batch["""input_ids"""].shape
_a : Optional[Any] =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_a : str =np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(SCREAMING_SNAKE_CASE )
if num_src_tokens > (max_tokens * 1.1):
failures.append(SCREAMING_SNAKE_CASE )
assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE )
if failures:
raise AssertionError(f"too many tokens in {len(SCREAMING_SNAKE_CASE )} batches" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
_a , _a , _a : Tuple =self._get_dataset(max_len=5_1_2 )
_a : Union[str, Any] =2
_a : str =ds.make_sortish_sampler(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE )
_a : Dict =DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_a : List[str] =DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE )
_a : Any =tokenizer.pad_token_id
def count_pad_tokens(SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple="input_ids" ):
return [batch[k].eq(SCREAMING_SNAKE_CASE ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE , k="""labels""" ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE , k="""labels""" ) )
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any=1_0_0_0 , SCREAMING_SNAKE_CASE :Any=1_2_8 ) -> List[Any]:
'''simple docstring'''
if os.getenv("""USE_REAL_DATA""" , SCREAMING_SNAKE_CASE ):
_a : str ="""examples/seq2seq/wmt_en_ro"""
_a : List[Any] =max_len * 2 * 6_4
if not Path(SCREAMING_SNAKE_CASE ).joinpath("""train.len""" ).exists():
save_len_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_a : str ="""examples/seq2seq/test_data/wmt_en_ro"""
_a : List[Any] =max_len * 4
save_len_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Tuple =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=SCREAMING_SNAKE_CASE , max_target_length=SCREAMING_SNAKE_CASE , n_obs=SCREAMING_SNAKE_CASE , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a , _a , _a : Union[str, Any] =self._get_dataset()
_a : Tuple =set(DistributedSortishSampler(SCREAMING_SNAKE_CASE , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE ) )
_a : List[str] =set(DistributedSortishSampler(SCREAMING_SNAKE_CASE , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE ) )
assert idsa.intersection(SCREAMING_SNAKE_CASE ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
if tok_name == MBART_TINY:
_a : List[str] =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
_a : Optional[int] =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_a : Tuple =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
_a : int =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(SCREAMING_SNAKE_CASE ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE ) == 0
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase ( *_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : Dict=True , _lowerCamelCase : Dict=2 ):
'''simple docstring'''
from .. import __version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = take_from
SCREAMING_SNAKE_CASE__ : str = ()
if not isinstance(args[0] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE__ : List[Any] = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
SCREAMING_SNAKE_CASE__ : List[str] = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = call_frame.filename
SCREAMING_SNAKE_CASE__ : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE__ : Optional[Any] = call_frame.function
SCREAMING_SNAKE_CASE__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values | 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ : List[Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
a__ : Optional[Any] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
a__ : Optional[Any] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
a__ : Any = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 622 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCAmelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
return image
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : int = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = val
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase : Optional[int] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCAmelCase : Optional[int] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
lowerCAmelCase : int = qkv_bias
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Dict = 3_6_4 if "coco" in model_name else 2_2_4
lowerCAmelCase : List[str] = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase : int = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase : List[str] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase : str = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
lowerCAmelCase : Union[str, Any] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any=False ):
'''simple docstring'''
lowerCAmelCase : Any = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
lowerCAmelCase : Optional[Any] = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
lowerCAmelCase , lowerCAmelCase : Any = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase : Union[str, Any] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCAmelCase , lowerCAmelCase : List[Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCAmelCase : Any = "cuda" if torch.cuda.is_available() else "cpu"
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCAmelCase : str = original_model.state_dict()
lowerCAmelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith("Qformer.bert" ):
lowerCAmelCase : Union[str, Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCAmelCase : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
lowerCAmelCase : Dict = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
lowerCAmelCase : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
lowerCAmelCase : Any = key.replace("opt" , "language" )
if key.startswith("t5" ):
lowerCAmelCase : Tuple = key.replace("t5" , "language" )
lowerCAmelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase : Union[str, Any] = load_demo_image()
lowerCAmelCase : Dict = vis_processors["eval"](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
lowerCAmelCase : int = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase : Union[str, Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
lowerCAmelCase : str = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
lowerCAmelCase : List[str] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
lowerCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
lowerCAmelCase : Tuple = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase : List[str] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
lowerCAmelCase : int = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
lowerCAmelCase : Optional[int] = ""
lowerCAmelCase : List[str] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = original_model.generate({"image": original_pixel_values} )
lowerCAmelCase : Any = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = input_ids.shape[1]
lowerCAmelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 645 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : Tuple , _A : Tuple , _A : Dict=16 , _A : Optional[Any]=13 , _A : str=7 , _A : Optional[Any]=14 , _A : int=10 , _A : str=19 , _A : List[Any]=5 , _A : Dict=4 , _A : int=True , _A : List[str]=16 , _A : Union[str, Any]=2 , _A : Any=4 , _A : Tuple=4 , _A : int="gelu" , _A : Tuple=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=[1, 2, 3, 4, 5] , _A : Optional[Any]=25 , _A : Optional[Any]=5 , ) -> List[str]:
"""simple docstring"""
lowercase : Dict = d_model
lowercase : Any = parent
lowercase : List[str] = batch_size
lowercase : List[str] = prediction_length
lowercase : str = context_length
lowercase : List[str] = cardinality
lowercase : Optional[int] = num_time_features
lowercase : Union[str, Any] = lags_sequence
lowercase : Union[str, Any] = embedding_dimension
lowercase : Optional[Any] = is_training
lowercase : Any = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Tuple = hidden_act
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Optional[int] = context_length
lowercase : str = prediction_length + label_length
lowercase : int = label_length
lowercase : List[Any] = moving_average
lowercase : int = autocorrelation_factor
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __a ( self : List[Any] , _A : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : int = config.context_length + max(config.lags_sequence )
lowercase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase : Union[str, Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
lowercase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase : int = floats_tensor([self.batch_size, config.prediction_length] )
lowercase : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : Any = self.get_config()
lowercase : Any = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self : Optional[Any] , _A : List[str] , _A : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = AutoformerModel(config=_A ).to(_A ).eval()
lowercase : Any = model(**_A )
lowercase : Dict = outputs.encoder_last_hidden_state
lowercase : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Optional[Any] = model.get_encoder()
encoder.save_pretrained(_A )
lowercase : Dict = AutoformerEncoder.from_pretrained(_A ).to(_A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = model.create_network_inputs(**_A )
lowercase , lowercase : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase : Optional[int] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase : Tuple = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowercase : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int = model.get_decoder()
decoder.save_pretrained(_A )
lowercase : Optional[int] = AutoformerDecoder.from_pretrained(_A ).to(_A )
lowercase : List[str] = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : int = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : str = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = AutoformerModelTester(self )
lowercase : str = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase : str = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
lowercase , lowercase : Optional[Any] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info['''missing_keys'''] , [] )
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : List[str] = inspect.signature(getattr(_A , '''forward''' ) )
# The main input is the name of the argument after `self`
lowercase : List[str] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(_A )
lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] = True
lowercase : List[str] = getattr(self.model_tester , '''seq_length''' , _A )
lowercase : Dict = getattr(self.model_tester , '''decoder_seq_length''' , _A )
lowercase : Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , _A )
lowercase : str = getattr(self.model_tester , '''d_model''' , _A )
lowercase : int = getattr(self.model_tester , '''num_attention_heads''' , _A )
lowercase : List[str] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase : Optional[Any] = True
lowercase : int = False
lowercase : str = True
lowercase : Tuple = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Tuple = model(**self._prepare_for_class(_A , _A ) )
lowercase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Optional[Any] = True
lowercase : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Any = model(**self._prepare_for_class(_A , _A ) )
lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase : Optional[Any] = len(_A )
lowercase : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
lowercase : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase : List[Any] = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase : Tuple = True
lowercase : Union[str, Any] = True
lowercase : Tuple = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Any = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
lowercase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __a ( self : str ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case( __magic_name__="train-batch.pt" ) -> int:
'''simple docstring'''
lowercase : str = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__magic_name__ , repo_type='''dataset''' )
lowercase : Dict = torch.load(__magic_name__ , map_location=__magic_name__ )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : Union[str, Any] = prepare_batch()
with torch.no_grad():
lowercase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowercase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
lowercase : Dict = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : Union[str, Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowercase : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
lowercase : Optional[Any] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : Dict = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowercase : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
lowercase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=_A )
lowercase : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1E-1 ) ) | 596 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
def __a ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
lowercase : Optional[int] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase : List[Any] = DDPMScheduler()
lowercase : Optional[int] = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A )
lowercase : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : List[str] = pipe(generator=_A , steps=4 )
lowercase : List[str] = output.audios[0]
lowercase : List[str] = output.images[0]
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : str = pipe(generator=_A , steps=4 , return_dict=_A )
lowercase : Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
lowercase : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase : List[Any] = DDIMScheduler()
lowercase : List[str] = self.dummy_vqvae_and_unet
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : int = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase : List[str] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Tuple = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=10 )
lowercase : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Dict = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = self.dummy_unet_condition
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : Dict = torch.rand((1, 1, 10) )
lowercase : Optional[int] = pipe(generator=_A , encoding=_A )
lowercase : int = output.images[0]
lowercase : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : Optional[int] = torch_device
lowercase : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowercase : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Dict = pipe(generator=_A )
lowercase : Union[str, Any] = output.audios[0]
lowercase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 596 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.