code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
_a = list(range(len(lowerCamelCase__ ) ) )
_a = [v / w for v, w in zip(lowerCamelCase__, lowerCamelCase__ )]
index.sort(key=lambda lowerCamelCase__ : ratio[i], reverse=lowerCamelCase__ )
_a = 0
_a = [0] * len(lowerCamelCase__ )
for i in index:
if weight[i] <= capacity:
_a = 1
max_value += value[i]
capacity -= weight[i]
else:
_a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : int ):
_a = word.split()
def justify(lowerCamelCase__ : list, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> str:
_a = max_width - width
_a = len(lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_a = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_a = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_a = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase__ ):
num_spaces_between_words_list[i] += 1
_a = []
for i in range(lowerCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase__ )
_a = []
_a = []
_a = 0
for word in words:
if width + len(lowerCamelCase__ ) + len(lowerCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase__ )
width += len(lowerCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) )
# reset new line and new width
_a , _a = [word], len(lowerCamelCase__ )
_a = max_width - width - len(lowerCamelCase__ )
answer.append(" ".join(lowerCamelCase__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
__snake_case : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
__UpperCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
__UpperCAmelCase : Optional[str] = field(default=a , metadata={"""help""": """A folder containing the training data."""} )
__UpperCAmelCase : Optional[str] = field(default=a , metadata={"""help""": """A folder containing the validation data."""} )
__UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__UpperCAmelCase : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
__UpperCAmelCase : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
if self.train_dir is not None:
_a = self.train_dir
if self.validation_dir is not None:
_a = self.validation_dir
_a = data_files if data_files else None
@dataclass
class A :
__UpperCAmelCase : str = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
__UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase : str = field(default=a , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCAmelCase : bool = field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={"""help""": """Stride to use for the encoder."""} , )
class A :
def __init__( self , snake_case_=1_9_2 , snake_case_=3_2 , snake_case_=4 , snake_case_=0.6 ) -> str:
_a = input_size
_a = mask_patch_size
_a = model_patch_size
_a = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_a = self.input_size // self.mask_patch_size
_a = self.mask_patch_size // self.model_patch_size
_a = self.rand_size**2
_a = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
_a = np.random.permutation(self.token_count )[: self.mask_count]
_a = np.zeros(self.token_count , dtype=snake_case_ )
_a = 1
_a = mask.reshape((self.rand_size, self.rand_size) )
_a = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _lowercase ( lowerCamelCase__ : Any ):
_a = torch.stack([example["pixel_values"] for example in examples] )
_a = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim", lowerCamelCase__, lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
_a = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, lowerCamelCase__ ) and data_args.train_val_split > 0.0:
_a = ds["train"].train_test_split(data_args.train_val_split )
_a = split["train"]
_a = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_a = AutoConfig.from_pretrained(model_args.config_name_or_path, **lowerCamelCase__ )
elif model_args.model_name_or_path:
_a = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCamelCase__ )
else:
_a = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCamelCase__, "decoder_type" ):
_a = "simmim"
# adapt config
_a = model_args.image_size if model_args.image_size is not None else config.image_size
_a = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_a = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_a = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **lowerCamelCase__ )
elif model_args.model_name_or_path:
_a = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **lowerCamelCase__ )
else:
_a = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_a = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_a = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
_a = AutoModelForMaskedImageModeling.from_config(lowerCamelCase__ )
if training_args.do_train:
_a = ds["train"].column_names
else:
_a = ds["validation"].column_names
if data_args.image_column_name is not None:
_a = data_args.image_column_name
elif "image" in column_names:
_a = "image"
elif "img" in column_names:
_a = "img"
else:
_a = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_a = Compose(
[
Lambda(lambda lowerCamelCase__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
_a = MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(lowerCamelCase__ : Tuple ):
_a = [transforms(lowerCamelCase__ ) for image in examples[image_column_name]]
_a = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_a = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_a = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase__ )
# Initialize our trainer
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=ds["train"] if training_args.do_train else None, eval_dataset=ds["validation"] if training_args.do_eval else None, tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics("eval", lowerCamelCase__ )
trainer.save_metrics("eval", lowerCamelCase__ )
# Write model card and (optionally) push to hub
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[int] = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case : Union[str, Any] = 16
__snake_case : Optional[Any] = 32
def _lowercase ( lowerCamelCase__ : Accelerator, lowerCamelCase__ : int = 16, lowerCamelCase__ : str = "bert-base-cased" ):
_a = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_a = load_dataset("glue", "mrpc" )
def tokenize_function(lowerCamelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=lowerCamelCase__, max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a = datasets.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(lowerCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(lowerCamelCase__, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"], shuffle=lowerCamelCase__, collate_fn=lowerCamelCase__, batch_size=lowerCamelCase__ )
_a = DataLoader(
tokenized_datasets["validation"], shuffle=lowerCamelCase__, collate_fn=lowerCamelCase__, batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str ):
# Initialize accelerator
_a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = args.model_name_or_path
set_seed(lowerCamelCase__ )
_a , _a = get_dataloaders(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__, return_dict=lowerCamelCase__ )
# Instantiate optimizer
_a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a = optimizer_cls(params=model.parameters(), lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_a = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_a = 1
_a = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__, num_warmup_steps=0, num_training_steps=lowerCamelCase__, )
else:
_a = DummyScheduler(lowerCamelCase__, total_num_steps=lowerCamelCase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_a = 0
# We also need to keep track of the stating epoch so files are named properly
_a = 0
# Now we train the model
_a = evaluate.load("glue", "mrpc" )
_a = 0
_a = {}
for epoch in range(lowerCamelCase__, lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
_a = model(**lowerCamelCase__ )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowerCamelCase__ )
_a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase__ ) - 1:
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase__, references=lowerCamelCase__, )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', lowerCamelCase__ )
_a = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_a = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( ):
_a = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=lowerCamelCase__, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=lowerCamelCase__, )
parser.add_argument(
"--output_dir", type=lowerCamelCase__, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=lowerCamelCase__, default=lowerCamelCase__, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=lowerCamelCase__, default=3, help="Number of train epochs.", )
_a = parser.parse_args()
_a = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase__, lowerCamelCase__ )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__snake_case : Any = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__snake_case : List[Any] = {
"gpt-neox-20b": 2048,
}
class A ( a ):
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="<|endoftext|>" , snake_case_="<|endoftext|>" , snake_case_="<|endoftext|>" , snake_case_=False , **snake_case_ , ) -> Any:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> List[int]:
_a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case_ , add_special_tokens=snake_case_ ) + [self.eos_token_id] )
if len(snake_case_ ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
return input_ids
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Tuple = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Optional[int] = """deta"""
__UpperCAmelCase : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case_=None , snake_case_=9_0_0 , snake_case_=2_0_4_8 , snake_case_=6 , snake_case_=2_0_4_8 , snake_case_=8 , snake_case_=6 , snake_case_=1_0_2_4 , snake_case_=8 , snake_case_=0.0 , snake_case_=True , snake_case_="relu" , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , snake_case_=True , snake_case_=False , snake_case_="sine" , snake_case_=5 , snake_case_=4 , snake_case_=4 , snake_case_=True , snake_case_=3_0_0 , snake_case_=True , snake_case_=True , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=1 , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , snake_case_=0.25 , **snake_case_ , ) -> Dict:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case_ , snake_case_ ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(snake_case_ )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
def __lowerCAmelCase ( self ) -> Any:
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *lowerCamelCase__ : str ):
with open(lowerCamelCase__, "r" ) as fh:
fcntl.flock(lowerCamelCase__, fcntl.LOCK_EX )
try:
print(*lowerCamelCase__ )
finally:
fcntl.flock(lowerCamelCase__, fcntl.LOCK_UN )
__snake_case : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__snake_case : Dict = torch.device("cuda", local_rank)
__snake_case : str = socket.gethostname()
__snake_case : Union[str, Any] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Optional[int] = dist.get_rank()
__snake_case : Optional[Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case : int = None
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__snake_case : List[Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : str = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class A ( a ):
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : Union[str, Any] = TaTokenizer
__UpperCAmelCase : List[int] = []
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=1_0_0 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [F'''<extra_id_{i}>''' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_a = len(set(filter(lambda snake_case_ : bool("extra_id_" in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
_a = vocab_file
_a = False if not self.vocab_file else True
_a = extra_ids
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ , snake_case_ ) -> int:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_a = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , snake_case_ , )
return max_model_length
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_a = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self ) -> Tuple:
return list(
set(filter(lambda snake_case_ : bool(re.search(R"<extra_id_\d+>" , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return [self.convert_tokens_to_ids(snake_case_ ) for token in self.get_sentinel_tokens()]
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
from typing import Any
class A :
def __init__( self , snake_case_ ) -> List[str]:
_a = data
_a = None
class A :
def __init__( self ) -> int:
_a = None
def __lowerCAmelCase ( self ) -> Dict:
_a = self.head
while temp is not None:
print(temp.data , end=" " )
_a = temp.next
print()
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = Node(snake_case_ )
_a = self.head
_a = new_node
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Tuple:
if node_data_a == node_data_a:
return
else:
_a = self.head
while node_a is not None and node_a.data != node_data_a:
_a = node_a.next
_a = self.head
while node_a is not None and node_a.data != node_data_a:
_a = node_a.next
if node_a is None or node_a is None:
return
_a , _a = node_a.data, node_a.data
if __name__ == "__main__":
__snake_case : List[str] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
import math
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__snake_case : int = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
__snake_case : str = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = (images / 2 + 0.5).clamp(0, 1 )
_a = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
_a = numpy_to_pil(lowerCamelCase__ )
return images
def _lowercase ( lowerCamelCase__ : str ):
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze(), mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowerCamelCase__ ) for image in images]
return pil_images
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__snake_case : Dict = "us-east-1" # defaults region
@dataclass
class A :
__UpperCAmelCase : str
__UpperCAmelCase : List[str] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
__UpperCAmelCase : Tuple = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
__UpperCAmelCase : List[str] = {**hyperparameters, """max_steps""": 1000}
@property
def __lowerCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCAmelCase ( self ) -> str:
return F'''{self.framework}-transfromers-test'''
@property
def __lowerCAmelCase ( self ) -> str:
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowerCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = SageMakerTestEnvironment(framework=request.cls.framework )
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__snake_case : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__snake_case : List[Any] = "sshleifer/student_marian_en_ro_6_1"
__snake_case : Any = "sshleifer/tiny-mbart"
@require_torch
class A ( a ):
def __lowerCAmelCase ( self , snake_case_=False , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , ) -> Any:
_a = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
_a = TrainerState.load_from_json(os.path.join(snake_case_ , "trainer_state.json" ) ).log_history
if not do_eval:
return
_a = [log for log in logs if "eval_loss" in log.keys()]
_a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_a = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , snake_case_ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=snake_case_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Any:
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
_a = experiments[experiment_id]
_a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
_a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["extra_args_str"] )
_a = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["n_matches"] )
@slow
def __lowerCAmelCase ( self ) -> Dict:
_a = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=snake_case_ , )
# Check metrics
_a = TrainerState.load_from_json(os.path.join(snake_case_ , "trainer_state.json" ) ).log_history
_a = [log for log in logs if "eval_loss" in log.keys()]
_a = eval_metrics[0]
_a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , snake_case_ )
# test if do_predict saves generations and metrics
_a = os.listdir(snake_case_ )
_a = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase ( self ) -> Tuple:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ ) -> Tuple[int, float]:
_a = "--skip_memory_metrics 0"
_a = self.run_trainer(
max_len=1_2_8 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
_a = TrainerState.load_from_json(Path(snake_case_ , "trainer_state.json" ) ).log_history
_a = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**2_0 )
_a = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**2_0 )
_a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_a , _a , _a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_a , _a , _a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_a = gpu_peak_mem_orig + gpu_alloc_mem_orig
_a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_a = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
snake_case_ , snake_case_ , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
snake_case_ , snake_case_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 3E-3 , snake_case_ = "adafactor" , snake_case_ = False , snake_case_ = None , snake_case_ = 0 , snake_case_ = True , snake_case_ = True , snake_case_ = True , snake_case_ = True , snake_case_ = None , ) -> Dict:
_a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
_a = self.get_auto_remove_tmp_dir()
_a = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(snake_case_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(snake_case_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_a = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(snake_case_ )}
'''.split()
_a = "\n --do_predict\n ".split()
_a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_a = get_gpu_count()
_a = get_torch_dist_unique_port()
_a = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
_a = ["run_translation.py"] + args
with patch.object(snake_case_ , "argv" , snake_case_ ):
main()
return output_dir
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ : int ):
_a = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__snake_case : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
__UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class A :
__UpperCAmelCase : Optional[str] = field(default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCAmelCase : bool = field(
default=a , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
if self.train_file is not None:
_a = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_a = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A :
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
def __call__( self , snake_case_ ) -> Dict:
_a = "label" if "label" in features[0].keys() else "labels"
_a = [feature.pop(snake_case_ ) for feature in features]
_a = len(snake_case_ )
_a = len(features[0]["input_ids"] )
_a = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
_a = list(chain(*snake_case_ ) )
_a = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_a = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
_a = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", lowerCamelCase__, lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_a = {}
if data_args.train_file is not None:
_a = data_args.train_file
if data_args.validation_file is not None:
_a = data_args.validation_file
_a = data_args.train_file.split("." )[-1]
_a = load_dataset(
lowerCamelCase__, data_files=lowerCamelCase__, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_a = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_a = [F'''ending{i}''' for i in range(4 )]
_a = "sent1"
_a = "sent2"
if data_args.max_seq_length is None:
_a = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_a = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_a = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ : Tuple ):
_a = [[context] * 4 for context in examples[context_name]]
_a = examples[question_header_name]
_a = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
_a = list(chain(*lowerCamelCase__ ) )
_a = list(chain(*lowerCamelCase__ ) )
# Tokenize
_a = tokenizer(
lowerCamelCase__, lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(lowerCamelCase__ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_a = raw_datasets["train"]
if data_args.max_train_samples is not None:
_a = min(len(lowerCamelCase__ ), data_args.max_train_samples )
_a = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_a = train_dataset.map(
lowerCamelCase__, batched=lowerCamelCase__, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_a = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_a = min(len(lowerCamelCase__ ), data_args.max_eval_samples )
_a = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_a = eval_dataset.map(
lowerCamelCase__, batched=lowerCamelCase__, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ : int ):
_a , _a = eval_predictions
_a = np.argmax(lowerCamelCase__, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_a = train_result.metrics
_a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
_a = min(lowerCamelCase__, len(lowerCamelCase__ ) )
trainer.log_metrics("train", lowerCamelCase__ )
trainer.save_metrics("train", lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
_a = min(lowerCamelCase__, len(lowerCamelCase__ ) )
trainer.log_metrics("eval", lowerCamelCase__ )
trainer.save_metrics("eval", lowerCamelCase__ )
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case : Union[str, Any] = random.Random()
if is_torch_available():
import torch
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str=1.0, lowerCamelCase__ : str=None, lowerCamelCase__ : Any=None ):
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=1 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=True , ) -> str:
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = feature_size
_a = padding_value
_a = sampling_rate
_a = return_attention_mask
_a = do_normalize
def __lowerCAmelCase ( self ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self , snake_case_=False , snake_case_=False ) -> int:
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Tuple = ASTFeatureExtractor
def __lowerCAmelCase ( self ) -> str:
_a = ASTFeatureExtractionTester(self )
def __lowerCAmelCase ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
_a = feat_extract(snake_case_ , padding=snake_case_ , return_tensors="np" ).input_values
_a = feat_extract(snake_case_ , padding=snake_case_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a = np.asarray(snake_case_ )
_a = feat_extract(snake_case_ , return_tensors="np" ).input_values
_a = feat_extract(snake_case_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
import torch
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = np.random.rand(1_0_0 ).astype(np.floataa )
_a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
from datasets import load_dataset
_a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_a = ds.sort("id" ).select(range(snake_case_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __lowerCAmelCase ( self ) -> Optional[int]:
# fmt: off
_a = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
_a = self._load_datasamples(1 )
_a = ASTFeatureExtractor()
_a = feature_extractor(snake_case_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case_ , atol=1E-4 ) )
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case : List[Any] = logging.getLogger(__name__)
class A ( a ):
__UpperCAmelCase : int = """summarization"""
__UpperCAmelCase : Union[str, Any] = ["""loss"""]
__UpperCAmelCase : Any = ROUGE_KEYS
__UpperCAmelCase : int = """rouge2"""
def __init__( self , snake_case_ , **snake_case_ ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
_a = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(snake_case_ , num_labels=snake_case_ , mode=self.mode , **snake_case_ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
_a = Path(self.output_dir ) / "metrics.json"
_a = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_a = 0
_a = defaultdict(snake_case_ )
_a = self.config.model_type
_a = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_a = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_a = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_a = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_a = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_a = get_git_info()["repo_sha"]
_a = hparams.num_workers
_a = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , snake_case_ ):
_a = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_a = self.decoder_start_token_id
_a = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
_a = False
_a = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_a = self.hparams.eval_max_gen_length
else:
_a = self.model.config.max_length
_a = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __lowerCAmelCase ( self , snake_case_ ) -> Dict[str, List[str]]:
_a = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(snake_case_ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
_a = True
return readable_batch
def __lowerCAmelCase ( self , snake_case_ , **snake_case_ ) -> List[str]:
return self.model(snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
_a = self.tokenizer.batch_decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return lmap(str.strip , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
_a = self.tokenizer.pad_token_id
_a , _a = batch["input_ids"], batch["attention_mask"]
_a = batch["labels"]
if isinstance(self.model , snake_case_ ):
_a = self.model._shift_right(snake_case_ )
else:
_a = shift_tokens_right(snake_case_ , snake_case_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_a = decoder_input_ids
self.save_readable_batch(snake_case_ )
_a = self(snake_case_ , attention_mask=snake_case_ , decoder_input_ids=snake_case_ , use_cache=snake_case_ )
_a = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_a = nn.CrossEntropyLoss(ignore_index=snake_case_ )
assert lm_logits.shape[-1] == self.vocab_size
_a = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_a = nn.functional.log_softmax(snake_case_ , dim=-1 )
_a , _a = label_smoothed_nll_loss(
snake_case_ , snake_case_ , self.hparams.label_smoothing , ignore_index=snake_case_ )
return (loss,)
@property
def __lowerCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Dict:
_a = self._step(snake_case_ )
_a = dict(zip(self.loss_names , snake_case_ ) )
# tokens per batch
_a = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_a = batch["input_ids"].shape[0]
_a = batch["input_ids"].eq(self.pad ).sum()
_a = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Dict:
return self._generative_step(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_="val" ) -> Dict:
self.step_count += 1
_a = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_a = losses["loss"]
_a = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_a = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_a = torch.tensor(snake_case_ ).type_as(snake_case_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(snake_case_ )
_a = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_a = self.step_count
self.metrics[prefix].append(snake_case_ ) # callback writes this to self.metrics_save_path
_a = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Dict:
return calculate_rouge(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> dict:
_a = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_a = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=snake_case_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_a = (time.time() - ta) / batch["input_ids"].shape[0]
_a = self.ids_to_clean_text(snake_case_ )
_a = self.ids_to_clean_text(batch["labels"] )
_a = self._step(snake_case_ )
_a = dict(zip(self.loss_names , snake_case_ ) )
_a = self.calc_generative_metrics(snake_case_ , snake_case_ )
_a = np.mean(lmap(snake_case_ , snake_case_ ) )
base_metrics.update(gen_time=snake_case_ , gen_len=snake_case_ , preds=snake_case_ , target=snake_case_ , **snake_case_ )
return base_metrics
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
return self._generative_step(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> int:
return self.validation_epoch_end(snake_case_ , prefix="test" )
def __lowerCAmelCase ( self , snake_case_ ) -> SeqaSeqDataset:
_a = self.n_obs[type_path]
_a = self.target_lens[type_path]
_a = self.dataset_class(
self.tokenizer , type_path=snake_case_ , n_obs=snake_case_ , max_target_length=snake_case_ , **self.dataset_kwargs , )
return dataset
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = False ) -> DataLoader:
_a = self.get_dataset(snake_case_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_a = dataset.make_sortish_sampler(snake_case_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case_ , batch_size=snake_case_ , collate_fn=dataset.collate_fn , shuffle=snake_case_ , num_workers=self.num_workers , sampler=snake_case_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_a = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case_ , batch_sampler=snake_case_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
snake_case_ , batch_size=snake_case_ , collate_fn=dataset.collate_fn , shuffle=snake_case_ , num_workers=self.num_workers , sampler=snake_case_ , )
def __lowerCAmelCase ( self ) -> DataLoader:
_a = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=snake_case_ )
return dataloader
def __lowerCAmelCase ( self ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def __lowerCAmelCase ( self ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ ) -> List[str]:
BaseTransformer.add_model_specific_args(snake_case_ , snake_case_ )
add_generic_args(snake_case_ , snake_case_ )
parser.add_argument(
"--max_source_length" , default=1_0_2_4 , type=snake_case_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=5_6 , type=snake_case_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=1_4_2 , type=snake_case_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=1_4_2 , type=snake_case_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=snake_case_ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=snake_case_ )
parser.add_argument("--max_tokens_per_batch" , type=snake_case_ , default=snake_case_ )
parser.add_argument("--logger_name" , type=snake_case_ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=snake_case_ , default=-1 , required=snake_case_ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=snake_case_ , default=5_0_0 , required=snake_case_ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=snake_case_ , default=-1 , required=snake_case_ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=snake_case_ , default="summarization" , required=snake_case_ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=snake_case_ , default=0.0 , required=snake_case_ )
parser.add_argument("--src_lang" , type=snake_case_ , default="" , required=snake_case_ )
parser.add_argument("--tgt_lang" , type=snake_case_ , default="" , required=snake_case_ )
parser.add_argument("--eval_beams" , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
"--val_metric" , type=snake_case_ , default=snake_case_ , required=snake_case_ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=snake_case_ , default=snake_case_ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=snake_case_ , default=1 , required=snake_case_ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=snake_case_ , default=-1 , required=snake_case_ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class A ( a ):
__UpperCAmelCase : str = """translation"""
__UpperCAmelCase : str = ["""loss"""]
__UpperCAmelCase : List[Any] = ["""bleu"""]
__UpperCAmelCase : Dict = """bleu"""
def __init__( self , snake_case_ , **snake_case_ ) -> int:
super().__init__(snake_case_ , **snake_case_ )
_a = hparams.src_lang
_a = hparams.tgt_lang
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> dict:
return calculate_bleu(snake_case_ , snake_case_ )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : List[Any]=None ):
Path(args.output_dir ).mkdir(exist_ok=lowerCamelCase__ )
check_output_dir(lowerCamelCase__, expected_items=3 )
if model is None:
if "summarization" in args.task:
_a = SummarizationModule(lowerCamelCase__ )
else:
_a = TranslationModule(lowerCamelCase__ )
_a = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
_a = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_a = os.environ.get("WANDB_PROJECT", lowerCamelCase__ )
_a = WandbLogger(name=model.output_dir.name, project=lowerCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_a = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_a = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
_a = False
_a = args.val_metric == "loss"
_a = generic_train(
lowerCamelCase__, lowerCamelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, lowerCamelCase__ ), early_stopping_callback=lowerCamelCase__, logger=lowerCamelCase__, )
pickle_save(model.hparams, model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
_a = ""
_a = sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt" ), recursive=lowerCamelCase__ ) )
if checkpoints:
_a = checkpoints[-1]
_a = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
__snake_case : List[Any] = pl.Trainer.add_argparse_args(parser)
__snake_case : Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case : List[str] = parser.parse_args()
main(args)
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int] ):
assert isinstance(lowerCamelCase__, lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : int, lowerCamelCase__ : Tuple ):
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__, lowerCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=lowerCamelCase__, cache_dir=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Tuple ):
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
_a = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : List[str], lowerCamelCase__ : str ):
_a = tmp_path / "cache"
_a = os.path.join(lowerCamelCase__, "tmp.sql" )
_a = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1 ).write()
_a = iter_sql_file(lowerCamelCase__ )
_a = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__, lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str] ):
_a = tmp_path / "cache"
_a = os.path.join(lowerCamelCase__, "tmp.sql" )
_a = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2 ).write()
_a = iter_sql_file(lowerCamelCase__ )
_a = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__, lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path / "cache"
_a = os.path.join(lowerCamelCase__, "tmp.sql" )
_a = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=lowerCamelCase__ ).read()
with pytest.raises(lowerCamelCase__ ):
SqlDatasetWriter(lowerCamelCase__, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0 ).write()
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__snake_case : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__snake_case : Any = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__snake_case : Tuple = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__snake_case : int = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__snake_case : Any = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__snake_case : Tuple = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__snake_case : Dict = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__snake_case : List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__snake_case : Optional[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__snake_case : Union[str, Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__snake_case : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__snake_case : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__snake_case : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__snake_case : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__snake_case : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__snake_case : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_MAPPING
__snake_case : Union[str, Any] = auto_class_update(FlaxAutoModel)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__snake_case : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__snake_case : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__snake_case : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__snake_case : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__snake_case : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__snake_case : str = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__snake_case : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__snake_case : Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
__snake_case : Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( a ):
__UpperCAmelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[int] = """CLIPImageProcessor"""
__UpperCAmelCase : Optional[int] = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Any:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
_a = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> str:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Dict:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def _lowercase ( lowerCamelCase__ : Callable[[int | float], int | float], lowerCamelCase__ : int | float, lowerCamelCase__ : int | float, lowerCamelCase__ : int = 100, ):
_a = x_start
_a = fnc(lowerCamelCase__ )
_a = 0.0
for _ in range(lowerCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
_a = (x_end - x_start) / steps + xa
_a = fnc(lowerCamelCase__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
_a = xa
_a = fxa
return length
if __name__ == "__main__":
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__snake_case : Union[str, Any] = 10
while i <= 10_0000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__snake_case : int = TypeVar("T")
__snake_case : List[Any] = TypeVar("U")
class A ( Generic[T, U] ):
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_a = key
_a = val
_a = None
_a = None
def __repr__( self ) -> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A ( Generic[T, U] ):
def __init__( self ) -> None:
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
_a , _a = self.rear, self.head
def __repr__( self ) -> str:
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> None:
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def __lowerCAmelCase ( self , snake_case_ ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class A ( Generic[T, U] ):
__UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case_ ) -> Any:
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self ) -> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case_ ) -> bool:
return key in self.cache
def __lowerCAmelCase ( self , snake_case_ ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(snake_case_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(snake_case_ )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , "cache_info" , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : str = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A ( a , a ):
__UpperCAmelCase : List[str] = """swin"""
__UpperCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=2_2_4 , snake_case_=4 , snake_case_=3 , snake_case_=9_6 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 1_2, 2_4] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=3_2 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(snake_case_ )
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = layer_norm_eps
_a = initializer_range
_a = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
_a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class A ( a ):
__UpperCAmelCase : Tuple = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import math
class A :
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(snake_case_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> list[list[int | float]]:
for i in range(len(snake_case_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowercase ( ):
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowerCamelCase__, lowerCamelCase__ )
# Update the winning vector
_a = self_organizing_map.update(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowerCamelCase__, lowerCamelCase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
class A : # Public class to implement a graph
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
_a = row
_a = col
_a = graph
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# Checking all 8 elements surrounding nth element
_a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_a = [-1, 0, 1, -1, 1, -1, 0, 1]
_a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , snake_case_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , snake_case_ )
def __lowerCAmelCase ( self ) -> int: # And finally, count all islands.
_a = [[False for j in range(self.COL )] for i in range(self.ROW )]
_a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(snake_case_ , snake_case_ , snake_case_ )
count += 1
return count
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float, lowerCamelCase__ : float, ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
__UpperCAmelCase : int = 10000
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class A ( datasets.ArrowBasedBuilder ):
__UpperCAmelCase : Union[str, Any] = ParquetConfig
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
_a = data_files
if isinstance(snake_case_ , snake_case_ ):
_a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_a = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
_a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a = [dl_manager.iter_files(snake_case_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case_ ):
with open(snake_case_ , "rb" ) as f:
_a = datasets.Features.from_arrow_schema(pq.read_schema(snake_case_ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"files": files} ) )
return splits
def __lowerCAmelCase ( self , snake_case_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a = table_cast(snake_case_ , self.info.features.arrow_schema )
return pa_table
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
with open(snake_case_ , "rb" ) as f:
_a = pq.ParquetFile(snake_case_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_a = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(snake_case_ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Any:
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a , _a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case_ )
assert mmeta["long_pair"] == "heb-eng"
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str ):
if n_term == "":
return []
_a = []
for temp in range(int(lowerCamelCase__ ) ):
series.append(F'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
__snake_case : int = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowercase ( lowerCamelCase__ : List[str] ):
def wrapper(*lowerCamelCase__ : Tuple, **lowerCamelCase__ : Tuple ):
_a = timeit.default_timer()
_a = func(*lowerCamelCase__, **lowerCamelCase__ )
_a = timeit.default_timer() - starttime
return delta
_a = func.__name__
return wrapper
def _lowercase ( lowerCamelCase__ : dict, lowerCamelCase__ : int=100, lowerCamelCase__ : Optional[Any]=None ):
_a = []
_a = seq_shapes or {}
for i in range(lowerCamelCase__ ):
_a = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase__, _ArrayXD ):
_a = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase__, datasets.Value ):
if v.dtype == "string":
_a = "The small grey turtle was surprisingly fast when challenged."
else:
_a = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase__, datasets.Sequence ):
while isinstance(lowerCamelCase__, datasets.Sequence ):
_a = v.feature
_a = seq_shapes[k]
_a = np.random.rand(*lowerCamelCase__ ).astype(v.dtype )
_a = data
dummy_data.append((i, example) )
return dummy_data
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any]=100, lowerCamelCase__ : Optional[Any]=None ):
_a = generate_examples(lowerCamelCase__, num_examples=lowerCamelCase__, seq_shapes=lowerCamelCase__ )
with ArrowWriter(features=lowerCamelCase__, path=lowerCamelCase__ ) as writer:
for key, record in dummy_data:
_a = features.encode_example(lowerCamelCase__ )
writer.write(lowerCamelCase__ )
_a , _a = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_a = datasets.Dataset.from_file(filename=lowerCamelCase__, info=datasets.DatasetInfo(features=lowerCamelCase__ ) )
return dataset
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *snake_case_ , **snake_case_ ) -> str:
pass
def _lowercase ( lowerCamelCase__ : Image ):
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowercase ( lowerCamelCase__ : Image ):
_a = np.array(lowerCamelCase__ )
_a = npimg.shape
return {"hash": hashimage(lowerCamelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__UpperCAmelCase : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = MaskGenerationPipeline(model=snake_case_ , image_processor=snake_case_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> str:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
_a = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_5_6 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "facebook/sam-vit-huge"
_a = pipeline("mask-generation" , model=snake_case_ )
_a = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053},
] , )
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__snake_case : int = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : int, lowerCamelCase__ : List[str]=None ):
# Initialise PyTorch model
_a = XLNetConfig.from_json_file(lowerCamelCase__ )
_a = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_a = finetuning_task
_a = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a = XLNetForSequenceClassification(lowerCamelCase__ )
elif "squad" in finetuning_task:
_a = finetuning_task
_a = XLNetForQuestionAnswering(lowerCamelCase__ )
else:
_a = XLNetLMHeadModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Save pytorch-model
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}''' )
torch.save(model.state_dict(), lowerCamelCase__ )
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase__ )}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
__snake_case : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : List[str] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class A ( a ):
def __init__( self , snake_case_=None , snake_case_=None , *snake_case_ , **snake_case_ ) -> str:
super().__init__(*snake_case_ , **snake_case_ )
if config is None:
assert isinstance(self.model , snake_case_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_a = self.model.config
else:
_a = config
_a = data_args
_a = self.config.tgt_vocab_size if isinstance(self.config , snake_case_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
_a = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a = label_smoothed_nll_loss
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if self.optimizer is None:
_a = ["bias", "LayerNorm.weight"]
_a = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_a = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a = Adafactor
_a = {"scale_parameter": False, "relative_step": False}
else:
_a = AdamW
_a = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_a = self.args.learning_rate
if self.sharded_ddp:
_a = OSS(
params=snake_case_ , optim=snake_case_ , **snake_case_ , )
else:
_a = optimizer_cls(snake_case_ , **snake_case_ )
if self.lr_scheduler is None:
_a = self._get_lr_scheduler(snake_case_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __lowerCAmelCase ( self , snake_case_ ) -> Union[str, Any]:
_a = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_a = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case_ )
return scheduler
def __lowerCAmelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a = model(**snake_case_ , use_cache=snake_case_ )[0]
_a = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_a , _a = model(**snake_case_ , labels=snake_case_ , use_cache=snake_case_ )[:2]
else:
# compute label smoothed loss
_a = model(**snake_case_ , use_cache=snake_case_ )[0]
_a = torch.nn.functional.log_softmax(snake_case_ , dim=-1 )
_a , _a = self.loss_fn(snake_case_ , snake_case_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = inputs.pop("labels" )
_a , _a = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
return loss
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_a = self._prepare_inputs(snake_case_ )
_a = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs["max_length"] )
_a = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_a , _a = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
_a = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
# If PAD token is not defined at least EOS token has to be defined
_a = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F''' padded to `max_length`={max_length}''' )
_a = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_a = tensor
return padded_tensor
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case : Optional[int] = False
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[str] = "ybelkada/fonts"
def _lowercase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
requires_backends(lowerCamelCase__, ["torch"] )
_check_torch_version()
_a = image_tensor.unsqueeze(0 )
_a = torch.nn.functional.unfold(lowerCamelCase__, (patch_height, patch_width), stride=(patch_height, patch_width) )
_a = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), lowerCamelCase__, lowerCamelCase__, -1 )
_a = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : int = 36, lowerCamelCase__ : str = "black", lowerCamelCase__ : str = "white", lowerCamelCase__ : int = 5, lowerCamelCase__ : int = 5, lowerCamelCase__ : int = 5, lowerCamelCase__ : int = 5, lowerCamelCase__ : Optional[bytes] = None, lowerCamelCase__ : Optional[str] = None, ):
requires_backends(lowerCamelCase__, "vision" )
# Add new lines so that each line is no more than 80 characters.
_a = textwrap.TextWrapper(width=80 )
_a = wrapper.wrap(text=lowerCamelCase__ )
_a = "\n".join(lowerCamelCase__ )
if font_bytes is not None and font_path is None:
_a = io.BytesIO(lowerCamelCase__ )
elif font_path is not None:
_a = font_path
else:
_a = hf_hub_download(lowerCamelCase__, "Arial.TTF" )
_a = ImageFont.truetype(lowerCamelCase__, encoding="UTF-8", size=lowerCamelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_a = ImageDraw.Draw(Image.new("RGB", (1, 1), lowerCamelCase__ ) )
_a , _a , _a , _a = temp_draw.textbbox((0, 0), lowerCamelCase__, lowerCamelCase__ )
# Create the actual image with a bit of padding around the text.
_a = text_width + left_padding + right_padding
_a = text_height + top_padding + bottom_padding
_a = Image.new("RGB", (image_width, image_height), lowerCamelCase__ )
_a = ImageDraw.Draw(lowerCamelCase__ )
draw.text(xy=(left_padding, top_padding), text=lowerCamelCase__, fill=lowerCamelCase__, font=lowerCamelCase__ )
return image
def _lowercase ( lowerCamelCase__ : np.ndarray, lowerCamelCase__ : str, **lowerCamelCase__ : Dict ):
requires_backends(lowerCamelCase__, "vision" )
# Convert to PIL image if necessary
_a = to_pil_image(lowerCamelCase__ )
_a = render_text(lowerCamelCase__, **lowerCamelCase__ )
_a = max(header_image.width, image.width )
_a = int(image.height * (new_width / image.width) )
_a = int(header_image.height * (new_width / header_image.width) )
_a = Image.new("RGB", (new_width, new_height + new_header_height), "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
_a = to_numpy_array(lowerCamelCase__ )
if infer_channel_dimension_format(lowerCamelCase__ ) == ChannelDimension.LAST:
_a = to_channel_dimension_format(lowerCamelCase__, ChannelDimension.LAST )
return new_image
class A ( a ):
__UpperCAmelCase : Dict = ["""flattened_patches"""]
def __init__( self , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = 2_0_4_8 , snake_case_ = False , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
_a = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
_a = do_normalize
_a = do_convert_rgb
_a = max_patches
_a = is_vqa
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_a = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
_a = torch.from_numpy(snake_case_ )
_a , _a = patch_size["height"], patch_size["width"]
_a , _a = get_image_size(snake_case_ )
# maximize scale s.t.
_a = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_a = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
_a = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
_a = max(num_feasible_rows * patch_height , 1 )
_a = max(num_feasible_cols * patch_width , 1 )
_a = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_a = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
_a = patches.shape
_a = patches_shape[1]
_a = patches_shape[2]
_a = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_a = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_a = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
_a = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_a = row_ids.to(torch.floataa )
_a = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_a = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_a = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
_a = to_numpy_array(snake_case_ )
return result
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , **snake_case_ ) -> np.ndarray:
if image.dtype == np.uinta:
_a = image.astype(np.floataa )
# take mean across the whole `image`
_a = np.mean(snake_case_ )
_a = np.std(snake_case_ )
_a = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> ImageInput:
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = patch_size if patch_size is not None else self.patch_size
_a = max_patches if max_patches is not None else self.max_patches
_a = self.is_vqa
if kwargs.get("data_format" , snake_case_ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_a = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_a = kwargs.pop("font_bytes" , snake_case_ )
_a = kwargs.pop("font_path" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_a = [header_text] * len(snake_case_ )
_a = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
_a = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
_a = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
_a = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_a = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : dict, lowerCamelCase__ : str ):
_a , _a = set(lowerCamelCase__ ), [start]
while stack:
_a = stack.pop()
explored.add(lowerCamelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCamelCase__ )
return explored
__snake_case : Tuple = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case : List[str] = datasets.utils.logging.get_logger(__name__)
class A ( folder_based_builder.FolderBasedBuilderConfig ):
__UpperCAmelCase : bool = None
__UpperCAmelCase : bool = None
class A ( folder_based_builder.FolderBasedBuilder ):
__UpperCAmelCase : Any = datasets.Audio()
__UpperCAmelCase : int = """audio"""
__UpperCAmelCase : Union[str, Any] = AudioFolderConfig
__UpperCAmelCase : List[str] # definition at the bottom of the script
__UpperCAmelCase : Optional[int] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__snake_case : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__snake_case : Union[str, Any] = AUDIO_EXTENSIONS
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int = 100 ):
_a = set()
_a = 0
_a = n + 1 # maximum limit
for a in range(2, lowerCamelCase__ ):
for b in range(2, lowerCamelCase__ ):
_a = a**b # calculates the current power
collect_powers.add(lowerCamelCase__ ) # adds the result to the set
return len(lowerCamelCase__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowercase ( ):
_a = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=lowerCamelCase__, default=1, help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script", type=lowerCamelCase__, help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
), )
# rest from the training program
parser.add_argument("training_script_args", nargs=lowerCamelCase__ )
return parser.parse_args()
def _lowercase ( ):
_a = parse_args()
# Import training_script as a module.
_a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a = script_fpath.stem
_a = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
_a = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__snake_case : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__snake_case : str = {"facebook/blenderbot-3B": 128}
class A ( a ):
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[str] = BlenderbotTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[Any]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ ) -> List[int]:
_a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case_ )
_a = " ".join(snake_case_ )
_a = self.encode(snake_case_ )
if len(snake_case_ ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "spiece.model"}
__snake_case : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
__snake_case : List[Any] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class A ( a ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_="<unk>" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="[SEP]" , snake_case_="[MASK]" , snake_case_="[CLS]" , snake_case_ = None , **snake_case_ , ) -> None:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , mask_token=snake_case_ , cls_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , snake_case_ ) -> Dict:
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return self.sp_model.piece_to_id(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
_a = self.sp_model.IdToPiece(snake_case_ )
return token
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(snake_case_ )
_a = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = None , snake_case_ = True , **snake_case_ , ) -> str:
_a = kwargs.pop("use_source_tokenizer" , snake_case_ )
_a = self.convert_ids_to_tokens(snake_case_ , skip_special_tokens=snake_case_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a = []
_a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
_a = []
sub_texts.append(snake_case_ )
else:
current_sub_text.append(snake_case_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case_ ) )
else:
_a = "".join(snake_case_ )
_a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a = self.clean_up_tokenization(snake_case_ )
return clean_text
else:
return text
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class A ( a ):
__UpperCAmelCase : List[str] = """audio-spectrogram-transformer"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-1_2 , snake_case_=1_6 , snake_case_=True , snake_case_=1_0 , snake_case_=1_0 , snake_case_=1_0_2_4 , snake_case_=1_2_8 , **snake_case_ , ) -> str:
super().__init__(**snake_case_ )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = patch_size
_a = qkv_bias
_a = frequency_stride
_a = time_stride
_a = max_length
_a = num_mel_bins
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowercase ( lowerCamelCase__ : Tuple ):
return x + 2
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
_a = "x = 3"
_a = {}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3} )
_a = "x = y"
_a = {"y": 5}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 5, "y": 5} )
def __lowerCAmelCase ( self ) -> str:
_a = "y = add_two(x)"
_a = {"x": 3}
_a = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
_a = evaluate(snake_case_ , {} , state=snake_case_ )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "x = 3"
_a = {}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3} )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "test_dict = {'x': x, 'y': add_two(x)}"
_a = {"x": 3}
_a = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
self.assertDictEqual(snake_case_ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "x = 3\ny = 5"
_a = {}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "y": 5} )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "text = f'This is x: {x}.'"
_a = {"x": 3}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case_ , {"x": 3, "text": "This is x: 3."} )
def __lowerCAmelCase ( self ) -> Dict:
_a = "if x <= 3:\n y = 2\nelse:\n y = 5"
_a = {"x": 3}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case_ , {"x": 3, "y": 2} )
_a = {"x": 8}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 8, "y": 5} )
def __lowerCAmelCase ( self ) -> Any:
_a = "test_list = [x, add_two(x)]"
_a = {"x": 3}
_a = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
self.assertListEqual(snake_case_ , [3, 5] )
self.assertDictEqual(snake_case_ , {"x": 3, "test_list": [3, 5]} )
def __lowerCAmelCase ( self ) -> int:
_a = "y = x"
_a = {"x": 3}
_a = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {"x": 3, "y": 3} )
def __lowerCAmelCase ( self ) -> Any:
_a = "test_list = [x, add_two(x)]\ntest_list[1]"
_a = {"x": 3}
_a = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "test_list": [3, 5]} )
_a = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
_a = {"x": 3}
_a = evaluate(snake_case_ , {"add_two": add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "x = 0\nfor i in range(3):\n x = i"
_a = {}
_a = evaluate(snake_case_ , {"range": range} , state=snake_case_ )
assert result == 2
self.assertDictEqual(snake_case_ , {"x": 2, "i": 2} )
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__snake_case : str = TypeVar("T")
class A ( Generic[T] ):
def __init__( self , snake_case_ , snake_case_ ) -> None:
_a = None
_a = len(snake_case_ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __lowerCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__snake_case : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__snake_case : Optional[int] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__snake_case : int = SegmentTree(test_array, min)
__snake_case : Optional[int] = SegmentTree(test_array, max)
__snake_case : List[str] = SegmentTree(test_array, lambda a, b: a + b)
def _lowercase ( ):
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__, len(lowerCamelCase__ ) ):
_a = reduce(lowerCamelCase__, test_array[i : j + 1] )
_a = reduce(lowerCamelCase__, test_array[i : j + 1] )
_a = reduce(lambda lowerCamelCase__, lowerCamelCase__ : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__, lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__, lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__, lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
__snake_case : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.48_145_466, 0.4_578_275, 0.40_821_073] , snake_case_=[0.26_862_954, 0.26_130_258, 0.27_577_711] , snake_case_=True , ) -> Optional[Any]:
_a = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
_a = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_convert_rgb
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCAmelCase ( self , snake_case_=False , snake_case_=False , snake_case_=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_a = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_a = []
for i in range(self.batch_size ):
_a , _a = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_a = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_a = [torch.from_numpy(snake_case_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A ( a , unittest.TestCase ):
__UpperCAmelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> str:
_a = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case_ , "image_mean" ) )
self.assertTrue(hasattr(snake_case_ , "image_std" ) )
self.assertTrue(hasattr(snake_case_ , "do_convert_rgb" ) )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> int:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> int:
_a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case_ )
_a = 3
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> str:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case_ , "image_mean" ) )
self.assertTrue(hasattr(snake_case_ , "image_std" ) )
self.assertTrue(hasattr(snake_case_ , "do_convert_rgb" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[str]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Tuple = [
"good first issue",
"feature request",
"wip",
]
def _lowercase ( ):
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/accelerate" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()], key=lambda lowerCamelCase__ : i.created_at, reverse=lowerCamelCase__ )
_a = comments[0] if len(lowerCamelCase__ ) > 0 else None
_a = dt.utcnow()
_a = (current_time - issue.updated_at).days
_a = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A ( a , unittest.TestCase ):
__UpperCAmelCase : str = MvpTokenizer
__UpperCAmelCase : Any = MvpTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = filter_roberta_detectors
def __lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def __lowerCAmelCase ( self , **snake_case_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self ) -> Tuple:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def __lowerCAmelCase ( self ) -> Tuple:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def __lowerCAmelCase ( self ) -> str:
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case_ )
self.assertIn("attention_mask" , snake_case_ )
self.assertNotIn("labels" , snake_case_ )
self.assertNotIn("decoder_attention_mask" , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=snake_case_ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def __lowerCAmelCase ( self ) -> str:
_a = ["A long paragraph for summarization."]
_a = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="pt" )
_a = inputs["input_ids"]
_a = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_a = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_a = "A, <mask> AllenNLP sentence."
_a = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
_a = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : str = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ : str ):
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_a = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_a = BitConfig(
conv_layer=lowerCamelCase__, num_labels=1_000, idalabel=lowerCamelCase__, labelaid=lowerCamelCase__, )
return config
def _lowercase ( lowerCamelCase__ : Tuple ):
if "stem.conv" in name:
_a = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
_a = name.replace("blocks", "layers" )
if "head.fc" in name:
_a = name.replace("head.fc", "classifier.1" )
if name.startswith("norm" ):
_a = "bit." + name
if "bit" not in name and "classifier" not in name:
_a = "bit.encoder." + name
return name
def _lowercase ( ):
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : int=False ):
_a = get_config(lowerCamelCase__ )
# load original model from timm
_a = create_model(lowerCamelCase__, pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model
_a = timm_model.state_dict()
for key in state_dict.copy().keys():
_a = state_dict.pop(lowerCamelCase__ )
_a = val.squeeze() if "head" in key else val
# load HuggingFace model
_a = BitForImageClassification(lowerCamelCase__ )
model.eval()
model.load_state_dict(lowerCamelCase__ )
# create image processor
_a = create_transform(**resolve_data_config({}, model=lowerCamelCase__ ) )
_a = transform.transforms
_a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_a = BitImageProcessor(
do_resize=lowerCamelCase__, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCamelCase__, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=lowerCamelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_a = prepare_img()
_a = transform(lowerCamelCase__ ).unsqueeze(0 )
_a = processor(lowerCamelCase__, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ )
# verify logits
with torch.no_grad():
_a = model(lowerCamelCase__ )
_a = outputs.logits
print("Logits:", logits[0, :3] )
print("Predicted class:", model.config.idalabel[logits.argmax(-1 ).item()] )
_a = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__, outputs.logits, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__snake_case : str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( a ):
__UpperCAmelCase : "DiagonalGaussianDistribution"
class A ( a , a ):
__UpperCAmelCase : Optional[Any] = True
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (6_4,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 0.18_215 , ) -> Dict:
super().__init__()
# pass init params to Encoder
_a = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
# pass init params to Decoder
_a = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , norm_num_groups=snake_case_ , act_fn=snake_case_ , )
_a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_a = nn.Convad(snake_case_ , snake_case_ , 1 )
_a = False
_a = False
# only relevant if vae tiling is enabled
_a = self.config.sample_size
_a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_a = 0.25
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Any:
if isinstance(snake_case_ , (Encoder, Decoder) ):
_a = value
def __lowerCAmelCase ( self , snake_case_ = True ) -> List[str]:
_a = use_tiling
def __lowerCAmelCase ( self ) -> str:
self.enable_tiling(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = True
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
_a = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
_a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
_a = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case_ , return_dict=snake_case_ )
if self.use_slicing and x.shape[0] > 1:
_a = [self.encoder(snake_case_ ) for x_slice in x.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case_ , return_dict=snake_case_ )
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_a = [self._decode(snake_case_ ).sample for z_slice in z.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self._decode(snake_case_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = min(a.shape[2] , b.shape[2] , snake_case_ )
for y in range(snake_case_ ):
_a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_a = min(a.shape[3] , b.shape[3] , snake_case_ )
for x in range(snake_case_ ):
_a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
_a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_latent_min_size * self.tile_overlap_factor )
_a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_a = []
for i in range(0 , x.shape[2] , snake_case_ ):
_a = []
for j in range(0 , x.shape[3] , snake_case_ ):
_a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_sample_min_size * self.tile_overlap_factor )
_a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_a = []
for i in range(0 , z.shape[2] , snake_case_ ):
_a = []
for j in range(0 , z.shape[3] , snake_case_ ):
_a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True , snake_case_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = sample
_a = self.encode(snake_case_ ).latent_dist
if sample_posterior:
_a = posterior.sample(generator=snake_case_ )
else:
_a = posterior.mode()
_a = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowercase ( ):
_a = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=lowerCamelCase__, default=1, help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script", type=lowerCamelCase__, help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
), )
# rest from the training program
parser.add_argument("training_script_args", nargs=lowerCamelCase__ )
return parser.parse_args()
def _lowercase ( ):
_a = parse_args()
# Import training_script as a module.
_a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a = script_fpath.stem
_a = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
_a = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : int = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( a ):
__UpperCAmelCase : List[Any] = """mobilenet_v1"""
def __init__( self , snake_case_=3 , snake_case_=2_2_4 , snake_case_=1.0 , snake_case_=8 , snake_case_="relu6" , snake_case_=True , snake_case_=0.999 , snake_case_=0.02 , snake_case_=0.001 , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = min_depth
_a = hidden_act
_a = tf_padding
_a = classifier_dropout_prob
_a = initializer_range
_a = layer_norm_eps
class A ( a ):
__UpperCAmelCase : int = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : list[float], lowerCamelCase__ : list[float] ):
_a = sorted(numsa + numsa )
_a , _a = divmod(len(lowerCamelCase__ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : List[str] = [float(x) for x in input("Enter the elements of first array: ").split()]
__snake_case : Tuple = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Tuple = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> Tuple:
_a = FlaxBertModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_a = FlaxBertModel.from_pretrained("bert-base-cased" )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : str = "▁"
__snake_case : Dict = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__snake_case : Any = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__snake_case : List[Any] = {
"facebook/s2t-small-librispeech-asr": 1024,
}
__snake_case : Union[str, Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__snake_case : Dict = {"mustc": MUSTC_LANGS}
class A ( a ):
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase : int = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_ = None , **snake_case_ , ) -> None:
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , do_upper_case=snake_case_ , do_lower_case=snake_case_ , tgt_lang=snake_case_ , lang_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(snake_case_ )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(snake_case_ , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [F'''<lang:{lang}>''' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.encoder )
@property
def __lowerCAmelCase ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __lowerCAmelCase ( self , snake_case_ ) -> None:
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> None:
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
return self.decoder.get(snake_case_ , self.unk_token )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(snake_case_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(snake_case_ )
_a = self.sp_model.decode(snake_case_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __lowerCAmelCase ( self ) -> Dict:
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , snake_case_ ) -> None:
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = Path(snake_case_ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Dict[str, Any] ):
_a = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def _lowercase ( lowerCamelCase__ : str ):
with open(lowerCamelCase__, "r" ) as f:
return json.load(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : str ):
with open(lowerCamelCase__, "w" ) as f:
json.dump(lowerCamelCase__, lowerCamelCase__, indent=2 )
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class A :
__UpperCAmelCase : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
__UpperCAmelCase : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
__UpperCAmelCase : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__UpperCAmelCase : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def _lowercase ( ):
_a = HfArgumentParser((ModelArguments,) )
((_a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a = True
_a = True
_a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=lowerCamelCase__, decoder_config=lowerCamelCase__, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a = decoder_config.decoder_start_token_id
_a = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a = decoder_config.bos_token_id
if pad_token_id is None:
_a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a = decoder_config.eos_token_id
_a = decoder_start_token_id
_a = pad_token_id
_a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : Any ):
if isinstance(lowerCamelCase__, torch.Tensor ):
return image
elif isinstance(lowerCamelCase__, PIL.Image.Image ):
_a = [image]
if isinstance(image[0], PIL.Image.Image ):
_a = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_a = np.concatenate(lowerCamelCase__, axis=0 )
_a = np.array(lowerCamelCase__ ).astype(np.floataa ) / 2_55.0
_a = image.transpose(0, 3, 1, 2 )
_a = 2.0 * image - 1.0
_a = torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0], torch.Tensor ):
_a = torch.cat(lowerCamelCase__, dim=0 )
return image
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str]=0.99_95 ):
if not isinstance(lowerCamelCase__, np.ndarray ):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(lowerCamelCase__ )
_a = np.sin(lowerCamelCase__ )
_a = theta_a * t
_a = np.sin(lowerCamelCase__ )
_a = np.sin(theta_a - theta_t ) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple ):
_a = F.normalize(lowerCamelCase__, dim=-1 )
_a = F.normalize(lowerCamelCase__, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any] ):
for param in model.parameters():
_a = value
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=snake_case_ , text_encoder=snake_case_ , clip_model=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , feature_extractor=snake_case_ , coca_model=snake_case_ , coca_tokenizer=snake_case_ , coca_transform=snake_case_ , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , snake_case_ )
else feature_extractor.size["shortest_edge"]
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case_ )
set_requires_grad(self.clip_model , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
set_requires_grad(self.vae , snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
set_requires_grad(self.vae , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
set_requires_grad(self.unet , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
# get the original timestep using init_timestep
_a = min(int(num_inference_steps * strength ) , snake_case_ )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Optional[Any]:
if not isinstance(snake_case_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(snake_case_ )}''' )
_a = image.to(device=snake_case_ , dtype=snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
_a = torch.cat(snake_case_ , dim=0 )
else:
_a = self.vae.encode(snake_case_ ).latent_dist.sample(snake_case_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18_215 * init_latents
_a = init_latents.repeat_interleave(snake_case_ , dim=0 )
_a = randn_tensor(init_latents.shape , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
_a = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
_a = init_latents
return latents
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = self.coca_transform(snake_case_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = self.feature_extractor.preprocess(snake_case_ )
_a = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(snake_case_ )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_a = image_embeddings_clip.repeat_interleave(snake_case_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_a = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(snake_case_ )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case_ ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18_215 * sample
_a = self.vae.decode(snake_case_ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(snake_case_ )
_a = self.normalize(snake_case_ ).to(latents.dtype )
_a = self.clip_model.get_image_features(snake_case_ )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_a = spherical_dist_loss(snake_case_ , snake_case_ ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(snake_case_ , snake_case_ )[0]
if isinstance(self.scheduler , snake_case_ ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(snake_case_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 0.6 , snake_case_ = 5_0 , snake_case_ = 7.5 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1_0_0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = 0.8 , snake_case_ = 0.1 , snake_case_ = 0.1 , ) -> List[str]:
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(snake_case_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(snake_case_ , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ", ".join(snake_case_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(snake_case_ )
if style_prompt is None:
if len(snake_case_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(snake_case_ )
# get prompt text embeddings for content and style
_a = self.tokenizer(
snake_case_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
snake_case_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(snake_case_ , snake_case_ , snake_case_ )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(snake_case_ , dim=0 )
# set timesteps
_a = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(snake_case_ , **snake_case_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(snake_case_ , snake_case_ , self.device )
_a = timesteps[:1].repeat(snake_case_ )
# Preprocess image
_a = preprocess(snake_case_ , snake_case_ , snake_case_ )
_a = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_a = preprocess(snake_case_ , snake_case_ , snake_case_ )
_a = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_a = slerp(snake_case_ , snake_case_ , snake_case_ )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_a = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_a = slerp(
snake_case_ , snake_case_ , snake_case_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([""] , padding="max_length" , max_length=snake_case_ , return_tensors="pt" )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(snake_case_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(snake_case_ , generator=snake_case_ , device="cpu" , dtype=snake_case_ ).to(
self.device )
else:
_a = torch.randn(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=snake_case_ ):
for i, t in enumerate(snake_case_ ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_a = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18_215 * latents
_a = self.vae.decode(snake_case_ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case_ , nsfw_content_detected=snake_case_ )
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( a ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> int:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = DistilBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = DistilBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = DistilBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(
snake_case_ , attention_mask=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = self.num_labels
_a = DistilBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = self.num_labels
_a = DistilBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = self.num_choices
_a = DistilBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
snake_case_ , attention_mask=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = config_and_inputs
_a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[int] = True
def __lowerCAmelCase ( self ) -> Any:
_a = DistilBertModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , dim=3_7 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DistilBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_a = True
_a = model_class(config=snake_case_ )
_a = self._prepare_for_class(snake_case_ , snake_case_ )
_a = torch.jit.trace(
snake_case_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case_ , os.path.join(snake_case_ , "traced_model.pt" ) )
_a = torch.jit.load(os.path.join(snake_case_ , "traced_model.pt" ) , map_location=snake_case_ )
loaded(inputs_dict["input_ids"].to(snake_case_ ) , inputs_dict["attention_mask"].to(snake_case_ ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
_a = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_a = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(snake_case_ , attention_mask=snake_case_ )[0]
_a = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) )
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
class A ( a ):
__UpperCAmelCase : str = """encoder-decoder"""
__UpperCAmelCase : List[str] = True
def __init__( self , **snake_case_ ) -> Optional[Any]:
super().__init__(**snake_case_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_a = kwargs.pop("encoder" )
_a = encoder_config.pop("model_type" )
_a = kwargs.pop("decoder" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = True
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_a = True
_a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = copy.deepcopy(self.__dict__ )
_a = self.encoder.to_dict()
_a = self.decoder.to_dict()
_a = self.__class__.model_type
return output
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , config_name=snake_case_ )
_a = GenerationConfig.from_pretrained(snake_case_ , config_name=snake_case_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = AutoConfig.from_pretrained("gpt2" )
_a = GenerationConfig.from_model_config(snake_case_ )
_a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case_ , snake_case_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __lowerCAmelCase ( self ) -> Dict:
_a = GenerationConfig()
_a = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_a = copy.deepcopy(snake_case_ )
_a = generation_config.update(**snake_case_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case_ , snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case_ , {"foo": "bar"} )
def __lowerCAmelCase ( self ) -> List[str]:
_a = GenerationConfig()
_a = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(snake_case_ )
_a = GenerationConfig.from_pretrained(snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_a = GenerationConfig.from_model_config(snake_case_ )
assert not hasattr(snake_case_ , "foo" ) # no new kwargs should be initialized if from config
def __lowerCAmelCase ( self ) -> int:
_a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case_ )
self.assertEqual(default_config.num_beams , 1 )
_a = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
_a = GenerationConfig.from_pretrained(snake_case_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
_a = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> List[str]:
_a = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="test-generation-config" , push_to_hub=snake_case_ , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def __lowerCAmelCase ( self ) -> List[str]:
_a = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="valid_org/test-generation-config-org" , push_to_hub=snake_case_ , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = "sshleifer/mar_enro_6_3_student"
class A ( a ):
def __lowerCAmelCase ( self ) -> str:
super().setUp()
_a = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=snake_case_ , )
_a = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Dict:
MarianMTModel.from_pretrained(snake_case_ )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> List[str]:
_a = {
"$MAX_LEN": 6_4,
"$BS": 6_4,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
_a = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
_a = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
_a = bash_script.replace(snake_case_ , str(snake_case_ ) )
_a = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_a = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_a = ["finetune.py"] + bash_script.split() + args
with patch.object(snake_case_ , "argv" , snake_case_ ):
_a = argparse.ArgumentParser()
_a = pl.Trainer.add_argparse_args(snake_case_ )
_a = SummarizationModule.add_model_specific_args(snake_case_ , os.getcwd() )
_a = parser.parse_args()
_a = main(snake_case_ )
# Check metrics
_a = load_json(model.metrics_save_path )
_a = metrics["val"][0]
_a = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , snake_case_ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_a = os.listdir(snake_case_ )
_a = [x for x in contents if x.endswith(".ckpt" )][0]
_a = os.path.join(args.output_dir , snake_case_ )
_a = torch.load(snake_case_ , map_location="cpu" )
_a = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_a = {os.path.basename(snake_case_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class A ( a ):
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> int:
_a = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_a = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_2_8,
"$BS": 1_6,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
_a = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
_a = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
_a = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
_a = bash_script.replace(snake_case_ , str(snake_case_ ) )
_a = self.get_auto_remove_tmp_dir()
_a = bash_script.replace("--fp16" , "" )
_a = 6
_a = (
["distillation.py"]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
F'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(snake_case_ , "argv" , snake_case_ ):
_a = argparse.ArgumentParser()
_a = pl.Trainer.add_argparse_args(snake_case_ )
_a = SummarizationDistiller.add_model_specific_args(snake_case_ , os.getcwd() )
_a = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_a = distill_main(snake_case_ )
# Check metrics
_a = load_json(model.metrics_save_path )
_a = metrics["val"][0]
_a = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , snake_case_ )
# check lightning ckpt can be loaded and has a reasonable statedict
_a = os.listdir(snake_case_ )
_a = [x for x in contents if x.endswith(".ckpt" )][0]
_a = os.path.join(args.output_dir , snake_case_ )
_a = torch.load(snake_case_ , map_location="cpu" )
_a = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_a = {os.path.basename(snake_case_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case : Optional[int] = logging.get_logger("transformers.models.speecht5")
__snake_case : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__snake_case : str = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__snake_case : int = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__snake_case : Dict = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__snake_case : Optional[Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__snake_case : str = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__snake_case : List[str] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__snake_case : Dict = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__snake_case : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case : List[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case : str = []
__snake_case : Tuple = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__snake_case : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__snake_case : Any = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__snake_case : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[int], lowerCamelCase__ : int, lowerCamelCase__ : Dict, lowerCamelCase__ : int ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any] ):
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hf_model.config.feat_extract_norm == "group", )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : int, lowerCamelCase__ : int ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any, lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : List[Any]=None, lowerCamelCase__ : int=None, ):
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(lowerCamelCase__ )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(lowerCamelCase__ )
elif task == "t2s":
_a = 1_876
_a = 600
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(lowerCamelCase__ )
elif task == "s2s":
_a = 1_876
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(lowerCamelCase__ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_a = SpeechTaTokenizer(lowerCamelCase__, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken("<mask>", lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ )
_a = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
recursively_load_weights(fairseq_checkpoint["model"], lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_a , _a , _a = equationa
_a , _a , _a = equationa
# Calculate the determinants of the matrices
_a = aa * ba - aa * ba
_a = ca * ba - ca * ba
_a = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a = determinant_x / determinant
_a = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[str]:
_a = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCAmelCase ( self ) -> int:
_a = None
_a = 2_0
_a = self._get_uniform_logits(batch_size=2 , length=snake_case_ )
# tweak scores to not be uniform anymore
_a = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_a = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_a = jax.nn.softmax(snake_case_ , axis=-1 )
_a = FlaxTemperatureLogitsWarper(temperature=0.5 )
_a = FlaxTemperatureLogitsWarper(temperature=1.3 )
_a = jax.nn.softmax(temp_dist_warper_sharper(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
_a = jax.nn.softmax(temp_dist_warper_smoother(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCAmelCase ( self ) -> Dict:
_a = None
_a = 1_0
_a = 2
# create ramp distribution
_a = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy()
_a = ramp_logits[1:, : vocab_size // 2] + vocab_size
_a = FlaxTopKLogitsWarper(3 )
_a = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_a = 5
_a = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_a = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, length) ).copy()
_a = top_k_warp_safety_check(snake_case_ , snake_case_ , cur_len=snake_case_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = None
_a = 1_0
_a = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_a = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_a = FlaxTopPLogitsWarper(0.8 )
_a = np.exp(top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_a = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_a = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_a = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_a = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_a = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = 2_0
_a = 4
_a = 0
_a = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
# check that min length is applied at length 5
_a = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_a = 5
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = 1_5
_a = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = 2_0
_a = 4
_a = 0
_a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
# check that all scores are -inf except the bos_token_id score
_a = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_a = 1
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_a = 3
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def __lowerCAmelCase ( self ) -> int:
_a = 2_0
_a = 4
_a = 0
_a = 5
_a = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_a = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_a = 4
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_a = 3
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def __lowerCAmelCase ( self ) -> Dict:
_a = 4
_a = 1_0
_a = 1_5
_a = 2
_a = 1
_a = 1_5
# dummy input_ids and scores
_a = ids_tensor((batch_size, sequence_length) , snake_case_ )
_a = input_ids.copy()
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = scores.copy()
# instantiate all dist processors
_a = FlaxTemperatureLogitsWarper(temperature=0.5 )
_a = FlaxTopKLogitsWarper(3 )
_a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_a = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
_a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
_a = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
_a = 1_0
# no processor list
_a = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
# with processor list
_a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_a = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCAmelCase ( self ) -> Dict:
_a = 4
_a = 1_0
_a = 1_5
_a = 2
_a = 1
_a = 1_5
# dummy input_ids and scores
_a = ids_tensor((batch_size, sequence_length) , snake_case_ )
_a = input_ids.copy()
_a = self._get_uniform_logits(snake_case_ , snake_case_ )
_a = scores.copy()
# instantiate all dist processors
_a = FlaxTemperatureLogitsWarper(temperature=0.5 )
_a = FlaxTopKLogitsWarper(3 )
_a = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_a = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
_a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
_a = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
_a = 1_0
# no processor list
def run_no_processor_list(snake_case_ , snake_case_ , snake_case_ ):
_a = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
_a = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
# with processor list
def run_processor_list(snake_case_ , snake_case_ , snake_case_ ):
_a = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_a = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
_a = jax.jit(snake_case_ )
_a = jax.jit(snake_case_ )
_a = jitted_run_no_processor_list(snake_case_ , snake_case_ , snake_case_ )
_a = jitted_run_processor_list(snake_case_ , snake_case_ , snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = (3, 3_2, 1_2_8)
_a = tempfile.mkdtemp()
# fmt: off
_a = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
_a = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
_a = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_a = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = self.prepare_image_inputs()
_a = image_processor(snake_case_ , return_tensors="np" )
_a = processor(images=snake_case_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = processor(text=snake_case_ )
_a = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.char_decode(snake_case_ )
_a = tokenizer.batch_decode(snake_case_ )
_a = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = None
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = torch.randn(1 , 2_7 , 3_8 )
_a = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_a = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[Any] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(snake_case_ , "depth_multiplier" ) )
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3 , snake_case_=3_2 , snake_case_=0.25 , snake_case_=8 , snake_case_=True , snake_case_=1_0_2_4 , snake_case_=3_2 , snake_case_="relu6" , snake_case_=0.1 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=1_0 , snake_case_=None , ) -> List[Any]:
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = min_depth
_a = tf_padding
_a = int(last_hidden_size * depth_multiplier )
_a = output_stride
_a = hidden_act
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = MobileNetVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_a = self.num_labels
_a = MobileNetVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Optional[Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __lowerCAmelCase ( self ) -> List[str]:
_a = MobileNetVaModelTester(self )
_a = MobileNetVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs.hidden_states
_a = 2_6
self.assertEqual(len(snake_case_ ) , snake_case_ )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> int:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileNetVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self ) -> Any:
_a = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(snake_case_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_a = model(**snake_case_ )
# verify the logits
_a = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_a = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger("transformers.models.speecht5")
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : str, lowerCamelCase__ : Optional[Any] ):
hf_model.apply_weight_norm()
_a = checkpoint["input_conv.weight_g"]
_a = checkpoint["input_conv.weight_v"]
_a = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
_a = checkpoint[F'''upsamples.{i}.1.weight_g''']
_a = checkpoint[F'''upsamples.{i}.1.weight_v''']
_a = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
_a = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
_a = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
_a = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
_a = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
_a = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
_a = checkpoint["output_conv.1.weight_g"]
_a = checkpoint["output_conv.1.weight_v"]
_a = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict=None, lowerCamelCase__ : Any=None, ):
if config_path is not None:
_a = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase__ )
else:
_a = SpeechTaHifiGanConfig()
_a = SpeechTaHifiGan(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
load_weights(orig_checkpoint["model"]["generator"], lowerCamelCase__, lowerCamelCase__ )
_a = np.load(lowerCamelCase__ )
_a = stats[0].reshape(-1 )
_a = stats[1].reshape(-1 )
_a = torch.from_numpy(lowerCamelCase__ ).float()
_a = torch.from_numpy(lowerCamelCase__ ).float()
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = dataset
_a = process
_a = params
def __len__( self ) -> Tuple:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> Tuple:
_a = self.dataset[i]
_a = self.process(snake_case_ , **self.params )
return processed
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Dict:
_a = loader
_a = infer
_a = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_a = None
_a = loader_batch_size
# Internal bookkeeping
_a = None
_a = None
def __len__( self ) -> Union[str, Any]:
return len(self.loader )
def __iter__( self ) -> Union[str, Any]:
_a = iter(self.loader )
return self
def __lowerCAmelCase ( self ) -> List[Any]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_a = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_a = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_a = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_a = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_a = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_a = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_a = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_a = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_a = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_a = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_a = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_a = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ) -> str:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_a = next(self.iterator )
_a = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_a = processed
else:
_a = list(processed.keys() )[0]
_a = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_a = len(snake_case_ )
else:
_a = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_a = observed_batch_size
# Setting internal index to unwrap the batch
_a = processed
_a = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Any:
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ) -> Dict:
_a = iter(self.loader )
_a = None
return self
def __lowerCAmelCase ( self ) -> int:
if self.subiterator is None:
_a = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_a = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_a = self.infer(next(self.iterator ) , **self.params )
_a = next(self.subiterator )
return processed
class A ( a ):
def __iter__( self ) -> Any:
_a = iter(self.loader )
return self
def __lowerCAmelCase ( self ) -> Tuple:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_a = False
_a = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_a = self.loader_batch_item()
_a = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_a = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_a = processed
else:
_a = list(processed.keys() )[0]
_a = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_a = len(snake_case_ )
else:
_a = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_a = observed_batch_size
_a = processed
_a = 0
while self._loader_batch_index < self.loader_batch_size:
_a = self.loader_batch_item()
_a = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_a = processed
_a = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class A ( a ):
def __init__( self , snake_case_ , snake_case_ ) -> Tuple:
_a = dataset
_a = key
def __len__( self ) -> str:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> Dict:
return self.dataset[i][self.key]
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = dataset
_a = keya
_a = keya
def __len__( self ) -> List[Any]:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> List[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 691 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Tuple ):
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hf_model.config.feat_extract_norm == "group", )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight" in name:
_a = "weight"
elif "bias" in name:
_a = "bias"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : int, lowerCamelCase__ : List[Any] ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Union[str, Any]=True ):
if config_path is not None:
_a = HubertConfig.from_pretrained(lowerCamelCase__ )
else:
_a = HubertConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(lowerCamelCase__, "vocab.json" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices, lowerCamelCase__ )
_a = WavaVecaCTCTokenizer(
lowerCamelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=lowerCamelCase__, )
_a = True if config.feat_extract_norm == "layer" else False
_a = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=lowerCamelCase__, return_attention_mask=lowerCamelCase__, )
_a = WavaVecaProcessor(feature_extractor=lowerCamelCase__, tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
_a = HubertForCTC(lowerCamelCase__ )
else:
_a = HubertModel(lowerCamelCase__ )
if is_finetuned:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a = model[0].eval()
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__snake_case : List[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_a = b * b - 4 * a * c
_a = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
_a = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowercase ( ):
_a , _a = quadratic_roots(a=5, b=6, c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case : Union[str, Any] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
_a = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Any:
_a = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_a = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , repo_id="test-config" , push_to_hub=snake_case_ , use_auth_token=self._token )
_a = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_a = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case_ , use_auth_token=self._token )
_a = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def __lowerCAmelCase ( self ) -> Any:
CustomConfig.register_for_auto_class()
_a = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_a = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a = c.n_embd + 1 # int
_a = c.resid_pdrop + 1.0 # float
_a = not c.scale_attn_weights # bool
_a = c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(snake_case_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case_ , c.summary_type , "mismatch for key: summary_type" )
def __lowerCAmelCase ( self ) -> List[str]:
_a = PretrainedConfig()
_a = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_a = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case_ , snake_case_ )]
if len(snake_case_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(snake_case_ )}.''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with self.assertRaises(snake_case_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_a = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
# A mock response for an HTTP head request to emulate server down
_a = mock.Mock()
_a = 5_0_0
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case_ ) as mock_head:
_a = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
_a = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __lowerCAmelCase ( self ) -> Tuple:
_a = AutoConfig.from_pretrained("bert-base-cased" )
_a = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case_ )
_a = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a = AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a = ["config.42.0.0.json"]
_a = 7_6_8
configuration.save_pretrained(snake_case_ )
shutil.move(os.path.join(snake_case_ , "config.4.0.0.json" ) , os.path.join(snake_case_ , "config.42.0.0.json" ) )
_a = AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def __lowerCAmelCase ( self ) -> Tuple:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_a = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_a = "v4.0.0"
_a , _a = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case_ , return_unused_kwargs=snake_case_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a = "v3.0.0"
_a = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 691 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = len(lowerCamelCase__ )
for i in range(length - 1 ):
_a = i
for k in range(i + 1, lowerCamelCase__ ):
if collection[k] < collection[least]:
_a = k
if least != i:
_a , _a = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case : List[str] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowerCamelCase__ : Dict[str, torch.Tensor] ):
_a = []
_a = []
_a = []
for rt in rc.restypes:
_a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_a = {name: i for i, name in enumerate(lowerCamelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_a = torch.tensor(
lowerCamelCase__, dtype=torch.intaa, device=protein["aatype"].device, )
_a = torch.tensor(
lowerCamelCase__, dtype=torch.intaa, device=protein["aatype"].device, )
_a = torch.tensor(
lowerCamelCase__, dtype=torch.floataa, device=protein["aatype"].device, )
_a = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_a = restype_atomaa_to_atomaa[protein_aatype]
_a = restype_atomaa_mask[protein_aatype]
_a = residx_atomaa_mask
_a = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_a = restype_atomaa_to_atomaa[protein_aatype]
_a = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_a = torch.zeros([21, 37], dtype=torch.floataa, device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_a = rc.restype_atoa[restype_letter]
_a = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_a = rc.atom_order[atom_name]
_a = 1
_a = restype_atomaa_mask[protein_aatype]
_a = residx_atomaa_mask
return protein
def _lowercase ( lowerCamelCase__ : Dict[str, torch.Tensor] ):
_a = tree_map(lambda lowerCamelCase__ : torch.tensor(lowerCamelCase__, device=batch["aatype"].device ), lowerCamelCase__, np.ndarray )
_a = tensor_tree_map(lambda lowerCamelCase__ : np.array(lowerCamelCase__ ), make_atomaa_masks(lowerCamelCase__ ) )
return out
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
'''simple docstring'''
import argparse
__snake_case : Union[str, Any] = "docs/source/_static/js/custom.js"
def _lowercase ( lowerCamelCase__ : str ):
with open(lowerCamelCase__, encoding="utf-8", newline="\n" ) as f:
_a = f.readlines()
_a = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
_a = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase__, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
__snake_case : Dict = parser.parse_args()
update_custom_js(args.version)
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCAmelCase : Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_a = text_generator("This is a test" , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_a = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
snake_case_ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_a = text_generator("This is a test" , do_sample=snake_case_ , num_return_sequences=2 , return_tensors=snake_case_ )
self.assertEqual(
snake_case_ , [
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
] , )
_a = text_generator.model.config.eos_token_id
_a = "<pad>"
_a = text_generator(
["This is a test", "This is a second test"] , do_sample=snake_case_ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case_ , )
self.assertEqual(
snake_case_ , [
[
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
],
[
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
_a = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_a = text_generator("This is a test" , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_a = text_generator(["This is a test", "This is a second test"] , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
_a = TextGenerationPipeline(model=snake_case_ , tokenizer=snake_case_ )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self ) -> Dict:
_a = "Hello I believe in"
_a = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_a = text_generator(snake_case_ )
self.assertEqual(
snake_case_ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_a = text_generator(snake_case_ , stop_sequence=" fe" )
self.assertEqual(snake_case_ , [{"generated_text": "Hello I believe in fe"}] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
_a = text_generator.model
_a = text_generator.tokenizer
_a = text_generator("This is a test" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_a = text_generator("This is a test" , return_full_text=snake_case_ )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_a = pipeline(task="text-generation" , model=snake_case_ , tokenizer=snake_case_ , return_full_text=snake_case_ )
_a = text_generator("This is a test" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_a = text_generator("This is a test" , return_full_text=snake_case_ )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_a = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_a = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
] , )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_full_text=snake_case_ , return_text=snake_case_ )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_full_text=snake_case_ , return_tensors=snake_case_ )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_text=snake_case_ , return_tensors=snake_case_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_a = text_generator("" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_a = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_a = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
_a = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(snake_case_ ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
import torch
# Classic `model_kwargs`
_a = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self ) -> str:
import torch
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
import torch
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=snake_case_ , top_p=0.5 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "Hello world"
_a = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_a = logging.get_logger("transformers.generation.tf_utils" )
else:
_a = logging.get_logger("transformers.generation.utils" )
_a = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(snake_case_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_new_tokens=1 )
self.assertNotIn(snake_case_ , cl.out )
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_length=1_0 )
self.assertNotIn(snake_case_ , cl.out )
| 691 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *snake_case_ , **snake_case_ ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A ( unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_a = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Tuple:
_a = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
"score": ANY(snake_case_ ),
"label": ANY(snake_case_ ),
"box": {"xmin": ANY(snake_case_ ), "ymin": ANY(snake_case_ ), "xmax": ANY(snake_case_ ), "ymax": ANY(snake_case_ )},
} , )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_a = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
"score": ANY(snake_case_ ),
"label": ANY(snake_case_ ),
"box": {"xmin": ANY(snake_case_ ), "ymin": ANY(snake_case_ ), "xmax": ANY(snake_case_ ), "ymax": ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = "hf-internal-testing/tiny-detr-mobilenetsv3"
_a = AutoModelForObjectDetection.from_pretrained(snake_case_ )
_a = AutoFeatureExtractor.from_pretrained(snake_case_ )
_a = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
] , )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_a = "facebook/detr-resnet-50"
_a = AutoModelForObjectDetection.from_pretrained(snake_case_ )
_a = AutoFeatureExtractor.from_pretrained(snake_case_ )
_a = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "facebook/detr-resnet-50"
_a = pipeline("object-detection" , model=snake_case_ )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = 0.9_985
_a = "facebook/detr-resnet-50"
_a = pipeline("object-detection" , model=snake_case_ )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"score": 0.9_988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_a = "Narsil/layoutlmv3-finetuned-funsd"
_a = 0.9_993
_a = pipeline("object-detection" , model=snake_case_ , threshold=snake_case_ )
_a = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
] , )
| 691 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
class A :
def __init__( self , snake_case_ ) -> None:
_a = size
# approximate the overall size of segment tree with given value
_a = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_a = [0 for i in range(0 , 4 * size )]
_a = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __lowerCAmelCase ( self , snake_case_ ) -> int:
return idx * 2
def __lowerCAmelCase ( self , snake_case_ ) -> int:
return idx * 2 + 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> None:
if left_element == right_element:
_a = a[left_element - 1]
else:
_a = (left_element + right_element) // 2
self.build(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ )
self.build(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ )
_a = max(
self.segment_tree[self.left(snake_case_ )] , self.segment_tree[self.right(snake_case_ )] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> bool:
if self.flag[idx] is True:
_a = self.lazy[idx]
_a = False
if left_element != right_element:
_a = self.lazy[idx]
_a = self.lazy[idx]
_a = True
_a = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_a = val
if left_element != right_element:
_a = val
_a = val
_a = True
_a = True
return True
_a = (left_element + right_element) // 2
self.update(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.update(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = max(
self.segment_tree[self.left(snake_case_ )] , self.segment_tree[self.right(snake_case_ )] )
return True
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int | float:
if self.flag[idx] is True:
_a = self.lazy[idx]
_a = False
if left_element != right_element:
_a = self.lazy[idx]
_a = self.lazy[idx]
_a = True
_a = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_a = (left_element + right_element) // 2
_a = self.query(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = self.query(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ , snake_case_ )
return max(snake_case_ , snake_case_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , snake_case_ , snake_case_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__snake_case : Tuple = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__snake_case : List[Any] = 15
__snake_case : List[str] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 691 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def _lowercase ( lowerCamelCase__ : List[Any]="no", lowerCamelCase__ : str = default_json_config_file, lowerCamelCase__ : bool = False ):
_a = Path(lowerCamelCase__ )
path.parent.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
_a = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
_a = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_a = torch.cuda.device_count()
_a = num_gpus
_a = False
if num_gpus > 1:
_a = "MULTI_GPU"
else:
_a = "NO"
elif is_xpu_available() and use_xpu:
_a = torch.xpu.device_count()
_a = num_xpus
_a = False
if num_xpus > 1:
_a = "MULTI_XPU"
else:
_a = "NO"
elif is_npu_available():
_a = torch.npu.device_count()
_a = num_npus
_a = False
if num_npus > 1:
_a = "MULTI_NPU"
else:
_a = "NO"
else:
_a = 0
_a = True
_a = 1
_a = "NO"
_a = ClusterConfig(**lowerCamelCase__ )
config.to_json_file(lowerCamelCase__ )
return path
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
_a = parser.add_parser("default", parents=lowerCamelCase__, help=lowerCamelCase__, formatter_class=lowerCamelCase__ )
parser.add_argument(
"--config_file", default=lowerCamelCase__, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=lowerCamelCase__, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 691 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( a ):
__UpperCAmelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[int] = """ViTImageProcessor"""
__UpperCAmelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Any:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[int]:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
_a = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
_a = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
_a = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
_a = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_a = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 691 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
_a = 1_0
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = [1, 2, 3, 4]
_a = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case_ , self.block_size , 0 ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
_a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(snake_case_ , self.block_size , 0 ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
_a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(snake_case_ , self.block_size , 0 ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_a , _a = process_story(snake_case_ )
self.assertEqual(snake_case_ , [] )
def __lowerCAmelCase ( self ) -> Dict:
_a = ""
_a , _a = process_story(snake_case_ )
self.assertEqual(snake_case_ , [] )
self.assertEqual(snake_case_ , [] )
def __lowerCAmelCase ( self ) -> Dict:
_a = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_a , _a = process_story(snake_case_ )
_a = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(snake_case_ , snake_case_ )
_a = ["It was the best of times."]
self.assertEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = torch.tensor([1, 2, 3, 4] )
_a = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case_ , 0 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
_a = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case_ , 2_3 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self ) -> List[str]:
_a = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_a = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case_ , 1 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = 1_0_1
_a = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
_a = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_a = compute_token_type_ids(snake_case_ , snake_case_ )
np.testing.assert_array_equal(snake_case_ , snake_case_ )
| 691 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__snake_case : int = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__snake_case : Optional[int] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__snake_case : str = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = CHRF.CHAR_ORDER , snake_case_ = CHRF.WORD_ORDER , snake_case_ = CHRF.BETA , snake_case_ = False , snake_case_ = False , snake_case_ = False , ) -> Dict:
_a = len(references[0] )
if any(len(snake_case_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_a = [[refs[i] for refs in references] for i in range(snake_case_ )]
_a = CHRF(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = sb_chrf.corpus_score(snake_case_ , snake_case_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 691 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str ):
_a = [int(lowerCamelCase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowerCamelCase__ ) == 4 and all(0 <= int(lowerCamelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__snake_case : int = input().strip()
__snake_case : str = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 691 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Dict, lowerCamelCase__ : Any ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase__, n - 1, lowerCamelCase__ ) * a) % mod
else:
_a = binary_exponentiation(lowerCamelCase__, n / 2, lowerCamelCase__ )
return (b * b) % mod
# a prime number
__snake_case : List[Any] = 701
__snake_case : str = 10_0000_0000
__snake_case : List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 691 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A :
__UpperCAmelCase : Tuple = PegasusConfig
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : str = """gelu"""
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=4_0 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ) -> List[str]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Dict:
_a = TFPegasusModel(config=snake_case_ ).get_decoder()
_a = inputs_dict["input_ids"]
_a = input_ids[:1, :]
_a = inputs_dict["attention_mask"][:1, :]
_a = inputs_dict["head_mask"]
_a = 1
# first forward pass
_a = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a = tf.concat([input_ids, next_tokens] , axis=-1 )
_a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a = model(snake_case_ , attention_mask=snake_case_ )[0]
_a = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a = output_from_no_past[:, -3:, random_slice_idx]
_a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[Any]=None, lowerCamelCase__ : Optional[Any]=None, lowerCamelCase__ : Optional[Any]=None, lowerCamelCase__ : Tuple=None, lowerCamelCase__ : Optional[int]=None, ):
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(lowerCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCAmelCase : Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Tuple = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
def __lowerCAmelCase ( self ) -> str:
_a = TFPegasusModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ )
def __lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCAmelCase : str = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCAmelCase : Any = """google/pegasus-xsum"""
@cached_property
def __lowerCAmelCase ( self ) -> str:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[Any]:
_a = self.translate_src_text(**snake_case_ )
assert self.expected_text == generated_words
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[int]:
_a = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="tf" )
_a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , )
_a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )
return generated_words
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._assert_generated_batch_equal_expected()
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( a ):
__UpperCAmelCase : List[Any] = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple:
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case_ )
return config
def __lowerCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = len(snake_case_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(snake_case_ )[0:3, None].repeat(1 , snake_case_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(snake_case_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(snake_case_ ) )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __lowerCAmelCase ( self ) -> Any:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = len(snake_case_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
_a = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(snake_case_ ) )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __lowerCAmelCase ( self ) -> str:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**snake_case_ )
_a = len(snake_case_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
_a = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(snake_case_ ) )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
_a = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(snake_case_ )
_a = prev_t.item()
self.assertEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(snake_case_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = [1_0_0, 8_7, 5_0, 1, 0]
_a = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[int]=None ):
_a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_a , _a = True, True
_a = dfs(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return path
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : int ):
_a = 0
_a = -1
for i in range(lowerCamelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[Any] ):
_a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_a , _a = check_circuit_or_path(lowerCamelCase__, lowerCamelCase__ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
_a = 1
if check == 2:
_a = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
_a = dfs(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
print(lowerCamelCase__ )
def _lowercase ( ):
_a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_a = {
1: [],
2: []
# all degree is zero
}
_a = 10
check_euler(lowerCamelCase__, lowerCamelCase__ )
check_euler(lowerCamelCase__, lowerCamelCase__ )
check_euler(lowerCamelCase__, lowerCamelCase__ )
check_euler(lowerCamelCase__, lowerCamelCase__ )
check_euler(lowerCamelCase__, lowerCamelCase__ )
if __name__ == "__main__":
main()
| 691 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : int = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__snake_case : str = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = EfficientNetConfig()
_a = CONFIG_MAP[model_name]["hidden_dim"]
_a = CONFIG_MAP[model_name]["width_coef"]
_a = CONFIG_MAP[model_name]["depth_coef"]
_a = CONFIG_MAP[model_name]["image_size"]
_a = CONFIG_MAP[model_name]["dropout_rate"]
_a = CONFIG_MAP[model_name]["dw_padding"]
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = 1_000
_a = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_a = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( ):
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw )
return im
def _lowercase ( lowerCamelCase__ : Tuple ):
_a = CONFIG_MAP[model_name]["image_size"]
_a = EfficientNetImageProcessor(
size={"height": size, "width": size}, image_mean=[0.4_85, 0.4_56, 0.4_06], image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63], do_center_crop=lowerCamelCase__, )
return preprocessor
def _lowercase ( lowerCamelCase__ : Tuple ):
_a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_a = sorted(set(lowerCamelCase__ ) )
_a = len(lowerCamelCase__ )
_a = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__, range(lowerCamelCase__ ) )}
_a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_a = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_a = {}
for item in rename_keys:
if item[0] in original_param_names:
_a = "efficientnet." + item[1]
_a = "classifier.weight"
_a = "classifier.bias"
return key_mapping
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple ):
for key, value in tf_params.items():
if "normalization" in key:
continue
_a = key_mapping[key]
if "_conv" in key and "kernel" in key:
_a = torch.from_numpy(lowerCamelCase__ ).permute(3, 2, 0, 1 )
elif "depthwise_kernel" in key:
_a = torch.from_numpy(lowerCamelCase__ ).permute(2, 3, 0, 1 )
elif "kernel" in key:
_a = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
_a = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : int ):
_a = model_classes[model_name](
include_top=lowerCamelCase__, weights="imagenet", input_tensor=lowerCamelCase__, input_shape=lowerCamelCase__, pooling=lowerCamelCase__, classes=1_000, classifier_activation="softmax", )
_a = original_model.trainable_variables
_a = original_model.non_trainable_variables
_a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_a = param.numpy()
_a = list(tf_params.keys() )
# Load HuggingFace model
_a = get_efficientnet_config(lowerCamelCase__ )
_a = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
_a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_a = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
_a = convert_image_processor(lowerCamelCase__ )
_a = preprocessor(images=prepare_img(), return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = outputs.logits.detach().numpy()
# Original model inference
_a = False
_a = CONFIG_MAP[model_name]["image_size"]
_a = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST )
_a = image.img_to_array(lowerCamelCase__ )
_a = np.expand_dims(lowerCamelCase__, axis=0 )
_a = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_a = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__snake_case : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 691 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 1 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowercase ( lowerCamelCase__ : dict ):
return (data["data"], data["target"])
def _lowercase ( lowerCamelCase__ : np.ndarray, lowerCamelCase__ : np.ndarray ):
_a = XGBClassifier()
classifier.fit(lowerCamelCase__, lowerCamelCase__ )
return classifier
def _lowercase ( ):
_a = load_iris()
_a , _a = data_handling(lowerCamelCase__ )
_a , _a , _a , _a = train_test_split(
lowerCamelCase__, lowerCamelCase__, test_size=0.25 )
_a = iris["target_names"]
# Create an XGBoost Classifier from the training data
_a = xgboost(lowerCamelCase__, lowerCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, display_labels=lowerCamelCase__, cmap="Blues", normalize="true", )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 691 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.