code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCamelCase = None
_lowerCamelCase = "utf-8"
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True # deprecated
_lowerCamelCase = None # deprecated
_lowerCamelCase = 10 << 20 # 10MB
_lowerCamelCase = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCamelCase = JsonConfig
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_UpperCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase ( self : Tuple , lowerCamelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_UpperCAmelCase = self.config.features.arrow_schema.field(lowerCamelCase ).type
_UpperCAmelCase = pa_table.append_column(lowerCamelCase , pa.array([None] * len(lowerCamelCase ) , type=lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase = json.load(lowerCamelCase )
# We keep only the field we are interested in
_UpperCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase , (list, tuple) ):
_UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase = {col: [row.get(lowerCamelCase ) for row in dataset] for col in keys}
else:
_UpperCAmelCase = dataset
_UpperCAmelCase = pa.Table.from_pydict(lowerCamelCase )
yield file_idx, self._cast_table(lowerCamelCase )
# If the file has one json object per line
else:
with open(lowerCamelCase , """rb""" ) as f:
_UpperCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_UpperCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
_UpperCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_UpperCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_UpperCAmelCase = batch.decode(self.config.encoding , errors=lowerCamelCase ).encode("""utf-8""" )
try:
while True:
try:
_UpperCAmelCase = paj.read_json(
io.BytesIO(lowerCamelCase ) , read_options=paj.ReadOptions(block_size=lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase )
or block_size > len(lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase = json.load(lowerCamelCase )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase , lowerCamelCase ): # list is the only sequence type supported in JSON
try:
_UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase = {col: [row.get(lowerCamelCase ) for row in dataset] for col in keys}
_UpperCAmelCase = pa.Table.from_pydict(lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowerCamelCase )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase )
batch_idx += 1 | 108 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 16 ):
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ =load_dataset("""glue""", """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__UpperCamelCase, max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ =datasets.map(
__UpperCamelCase, batched=__UpperCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ =tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ =8
else:
SCREAMING_SNAKE_CASE__ =None
return tokenizer.pad(
__UpperCamelCase, padding="""longest""", max_length=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_tensors="""pt""", )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""train"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""validation"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ =2
# New Code #
SCREAMING_SNAKE_CASE__ =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ =config["""lr"""]
SCREAMING_SNAKE_CASE__ =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ =int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ =evaluate.load("""glue""", """mrpc""" )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_dataloaders(__UpperCamelCase, __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ =AdamW(params=model.parameters(), lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.prepare(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase, references=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", __UpperCamelCase )
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=__UpperCamelCase, default=__UpperCamelCase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""", type=__UpperCamelCase, default=1, help="""The number of minibatches to be ran before gradients are accumulated.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ =parser.parse_args()
SCREAMING_SNAKE_CASE__ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
main()
| 151 | 0 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
__a = False
if low == high:
return swapped
__a = low
__a = high
while left < right:
if collection[left] > collection[right]:
__a , __a = (
collection[right],
collection[left],
)
__a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__a , __a = (
collection[right + 1],
collection[left],
)
__a = True
__a = low + int((high - low) / 2 )
__a = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__a = True
while is_not_sorted is True:
__a = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 201 |
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
SCREAMING_SNAKE_CASE_ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE_ = len(train_data)
SCREAMING_SNAKE_CASE_ = 0.0_0_9
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - output(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=m ) -> Optional[Any]:
"""simple docstring"""
__a = 0
for i in range(__SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(__SCREAMING_SNAKE_CASE )
else:
summation_value += _error(__SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = summation_of_cost_derivative(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def __lowercase ( ) -> str:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__a = 0.000_002
__a = 0
__a = 0
while True:
j += 1
__a = [0, 0, 0, 0]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
__a = get_cost_derivative(i - 1 )
__a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE , rtol=__SCREAMING_SNAKE_CASE , ):
break
__a = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowercase ( ) -> List[Any]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
print(("""Actual output value:""", output(__SCREAMING_SNAKE_CASE , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 201 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowerCamelCase : Tuple = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 13_1072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
}
def A__ ( __A : int , __A : Dict ) ->List[Any]:
return torch.atana(__A , __A ) / math.pi * 2
def A__ ( __A : Tuple ) ->int:
__A =torch.sin(t * math.pi / 2 ) ** 2
__A =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__A , __A )
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase__ ):
'''simple docstring'''
super().__init__()
__A =DiffusionAttnUnetaD(lowercase__ , n_attn_layers=4 )
__A =deepcopy(self.diffusion )
__A =torch.quasirandom.SobolEngine(1 , scramble=lowercase__ )
def A__ ( __A : int ) ->int:
__A =MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
_lowerCamelCase : Any = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
_lowerCamelCase : int = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
_lowerCamelCase : Optional[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
_lowerCamelCase : List[str] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
_lowerCamelCase : List[str] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
_lowerCamelCase : Optional[int] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def A__ ( __A : List[str] ) ->List[Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A__ ( __A : Tuple ) ->str:
for key, value in ATTN_MAP.items():
if name.startswith(__A ) and not isinstance(__A , __A ):
return name.replace(__A , __A )
elif name.startswith(__A ):
return [name.replace(__A , __A ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A__ ( __A : List[Any] , __A : int=13 ) ->Optional[Any]:
__A =input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
__A =0
if string.startswith('''net.3.''' ):
depth += 1
__A =string[6:]
elif string.startswith('''net.''' ):
__A =string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__A =string[7:]
if string.startswith('''main.''' ):
__A =string[5:]
# mid block
if string[:2].isdigit():
__A =string[:2]
__A =string[2:]
else:
__A =string[0]
__A =string[1:]
if depth == max_depth:
__A =MID_NUM_TO_LAYER[layer_num]
__A ='''mid_block'''
elif depth > 0 and int(__A ) < 7:
__A =DOWN_NUM_TO_LAYER[layer_num]
__A =F'''down_blocks.{depth}'''
elif depth > 0 and int(__A ) > 7:
__A =UP_NUM_TO_LAYER[layer_num]
__A =F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__A =DEPTH_0_TO_LAYER[layer_num]
__A =F'''up_blocks.{max_depth - 1}''' if int(__A ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
__A =string_left[1:]
if "resnets" in new_layer:
__A =convert_resconv_naming(__A )
elif "attentions" in new_layer:
__A =convert_attn_naming(__A )
__A =new_string_left
if not isinstance(__A , __A ):
__A =prefix + '''.''' + new_layer + '''.''' + string_left
else:
__A =[prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def A__ ( __A : Optional[Any] ) ->int:
__A ={}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__A =rename(__A )
# check if we need to transform from Conv => Linear for attention
if isinstance(__A , __A ):
__A =transform_conv_attns(__A , __A , __A )
else:
__A =v
return new_state_dict
def A__ ( __A : int , __A : Optional[int] , __A : str ) ->str:
if len(__A ) == 1:
if len(v.shape ) == 3:
# weight
__A =v[:, :, 0]
else:
# bias
__A =v
else:
# qkv matrices
__A =v.shape[0]
__A =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__A =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__A =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A__ ( __A : Tuple ) ->List[Any]:
__A =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__A =args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__A =download(__A )
__A =MODELS_MAP[model_name]['''sample_rate''']
__A =MODELS_MAP[model_name]['''sample_size''']
__A =Object()
__A =sample_size
__A =sample_rate
__A =0
__A =UNetaDModel(sample_size=__A , sample_rate=__A )
__A =diffusers_model.state_dict()
__A =DiffusionUncond(__A )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__A )['''state_dict'''] )
__A =orig_model.diffusion_ema.eval()
__A =orig_model.state_dict()
__A =rename_orig_weights(__A )
__A =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__A =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__A ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(__A ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__A =value.squeeze()
__A =value
diffusers_model.load_state_dict(__A )
__A =1_00
__A =33
__A =IPNDMScheduler(num_train_timesteps=__A )
__A =torch.manual_seed(__A )
__A =torch.randn([1, 2, config.sample_size] , generator=__A ).to(__A )
__A =torch.linspace(1 , 0 , steps + 1 , device=__A )[:-1]
__A =get_crash_schedule(__A )
__A =DanceDiffusionPipeline(unet=__A , scheduler=__A )
__A =torch.manual_seed(33 )
__A =pipe(num_inference_steps=__A , generator=__A ).audios
__A =sampling.iplms_sample(__A , __A , __A , {} )
__A =generated.clamp(-1 , 1 )
__A =(generated - audio).abs().sum()
__A =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , __A )
print('''Diff max''' , __A )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCamelCase : List[Any] = parser.parse_args()
main(args)
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 1 |
'''simple docstring'''
import requests
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = {'''Content-Type''': '''application/json'''}
lowerCamelCase_ : Optional[int] = requests.post(__UpperCAmelCase , json={'''text''': message_body} , headers=__UpperCAmelCase )
if response.status_code != 200:
lowerCamelCase_ : int = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 418 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ : List[str] = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase_ : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCamelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ : Optional[int] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCamelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCamelCase_ ) , )
| 418 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=1_6 , UpperCAmelCase=3_6 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Dict:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = AlbertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
__a = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
__a = AlbertForPreTraining(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , sentence_order_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
__a = AlbertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
__a = AlbertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = self.num_labels
__a = AlbertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
__a = self.num_labels
__a = AlbertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
__a = self.num_choices
__a = AlbertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = True
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
__a = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = AlbertModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = AlbertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = AlbertModel.from_pretrained('albert-base-v2' )
__a = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__a = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase )
__a = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) )
| 559 | from math import pow, sqrt
def lowerCAmelCase( *__lowerCamelCase ):
__a = len(__lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 559 | 1 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = len(snake_case )
SCREAMING_SNAKE_CASE:Any = []
for i in range(len(snake_case ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE:Optional[int] = True
for j in range(snake_case ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE:Tuple = False
break
if match_found:
position.append(snake_case )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 465 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( _a ):
_A : int = ['''pixel_values''']
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE:int = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE:str = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:List[Any] = do_resize
SCREAMING_SNAKE_CASE:Optional[int] = size
SCREAMING_SNAKE_CASE:List[Any] = resample
SCREAMING_SNAKE_CASE:Any = do_center_crop
SCREAMING_SNAKE_CASE:List[Any] = crop_size
SCREAMING_SNAKE_CASE:Tuple = do_rescale
SCREAMING_SNAKE_CASE:Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE:Dict = do_normalize
SCREAMING_SNAKE_CASE:int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE:Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE:Optional[int] = do_convert_rgb
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE:Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=size["shortest_edge"] ,default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size["height"], size["width"]) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="size" ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE:Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE:str = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:List[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE:Any = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE:List[Any] = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:Any = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 465 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __UpperCamelCase ( __snake_case ):
__snake_case :Union[str, Any] = """xglm"""
__snake_case :Any = ["""past_key_values"""]
__snake_case :Union[str, Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=25_6008 , _lowerCAmelCase : Dict=2048 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : Any=4096 , _lowerCAmelCase : List[str]=24 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : Any=2 , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = ffn_dim
__lowercase = num_layers
__lowercase = attention_heads
__lowercase = activation_function
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = layerdrop
__lowercase = init_std
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = use_cache
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 80 |
"""simple docstring"""
from functools import lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> set:
__lowerCAmelCase: Any = 2
__lowerCAmelCase: Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(__SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1)
def a__ ( __SCREAMING_SNAKE_CASE ) -> list:
__lowerCAmelCase: int = 2
while True:
# Increment each value of a generated range
__lowerCAmelCase: Union[str, Any] = [base + i for i in range(__SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCAmelCase: Dict = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group]
checker.append(__SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(__SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __SCREAMING_SNAKE_CASE = 4 ) -> int:
__lowerCAmelCase: List[str] = run(__SCREAMING_SNAKE_CASE )
return results[0] if len(__SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 346 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : str = '''layoutlmv3'''
def __init__( self: Any , UpperCamelCase_: Optional[int]=50_265 , UpperCamelCase_: Any=768 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: Tuple=3_072 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: str=512 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: str=1E-5 , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: List[str]=1_024 , UpperCamelCase_: Tuple=128 , UpperCamelCase_: Union[str, Any]=128 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=32 , UpperCamelCase_: Tuple=128 , UpperCamelCase_: Tuple=64 , UpperCamelCase_: Optional[Any]=256 , UpperCamelCase_: int=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[int]=224 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Dict=16 , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase_ ( self: str ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase_ ( self: str , UpperCamelCase_: "ProcessorMixin" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 429 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__ = Vector()
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,0,0,0,0,1)''' )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2] )
lowercase__ = Vector([1, 2, 3, 4, 5] )
lowercase__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([2, -1, 4] ) # for test of dot product
lowercase__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: List[Any] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , '''(3,4,7)''' )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0, 0, 0, 0] )
lowercase__ = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__ = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 429 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : List[str] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *snake_case_ : Tuple , **snake_case_ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : Tuple , **snake_case_ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[str] , *snake_case_ : Optional[int] , **snake_case_ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : int , *snake_case_ : Dict , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : int , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : Optional[int] , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Dict , *snake_case_ : int , **snake_case_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : str , **snake_case_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[Any] , *snake_case_ : Dict , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : str , *snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[str] , *snake_case_ : Any , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : Any , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *snake_case_ : Tuple , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *snake_case_ : int , **snake_case_ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : List[Any] , *snake_case_ : Any , **snake_case_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Any , *snake_case_ : List[str] , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : int , *snake_case_ : Optional[Any] , **snake_case_ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""sentencepiece"""]
def __init__( self : Dict , *snake_case_ : Dict , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
| 347 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _a ( __lowerCAmelCase : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
snake_case__ : List[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple[int, ...] ):
"""simple docstring"""
snake_case__ : Optional[int] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
snake_case__ : Union[str, Any] = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def _a ( __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Optional[Sequence[bool]] = None , __lowerCAmelCase : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(__lowerCAmelCase : List[bool] ) -> None:
snake_case__ : List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case__ : Union[str, Any] = l[reversed_idx]
if start_edges is None:
snake_case__ : List[str] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
snake_case__ : Union[str, Any] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case__ : List[Tuple[slice, ...]] = []
snake_case__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
snake_case__ : Tuple[slice, ...] = tuple(__lowerCAmelCase )
snake_case__ : List[Any] = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Any = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Dict = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case__ : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _a ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] = t.shape[:no_batch_dims]
snake_case__ : str = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
snake_case__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
snake_case__ : Any = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case__ : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _a ( __lowerCAmelCase : Callable , __lowerCAmelCase : Dict[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool = False , __lowerCAmelCase : Any = None , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
snake_case__ : str = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case__ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
snake_case__ : str = None
if _out is not None:
snake_case__ : int = tensor_tree_map(lambda __lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case__ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case__ : Any = 0
snake_case__ : Dict = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
snake_case__ : int = _select_chunk
else:
snake_case__ : Optional[Any] = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
snake_case__ : Dict[str, Any] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
snake_case__ : Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
snake_case__ : str = tensor_tree_map(lambda __lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(__lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case__ : List[Any] = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case__ : Optional[int] = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case__ : str = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case__ : Optional[Any] = tensor_tree_map(lambda __lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class a :
"""simple docstring"""
def __init__( self : str , snake_case_ : int = 5_1_2 , ):
'''simple docstring'''
snake_case__ : Any = max_chunk_size
snake_case__ : Optional[int] = None
snake_case__ : Optional[tuple] = None
def __magic_name__ ( self : List[Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int ):
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case__ : Any = [c for c in candidates if c > min_chunk_size]
snake_case__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case_ : int ) -> bool:
try:
with torch.no_grad():
fn(*snake_case_ , chunk_size=snake_case_ )
return True
except RuntimeError:
return False
snake_case__ : Any = 0
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while i > min_viable_chunk_size_index:
snake_case__ : Any = test_chunk_size(candidates[i] )
if not viable:
snake_case__ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
snake_case__ : Tuple = i
snake_case__ : Any = (i + len(snake_case_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __magic_name__ ( self : List[Any] , snake_case_ : Iterable , snake_case_ : Iterable ):
'''simple docstring'''
snake_case__ : str = True
for aa, aa in zip(snake_case_ , snake_case_ ):
assert type(snake_case_ ) == type(snake_case_ )
if isinstance(snake_case_ , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
snake_case__ : Any = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
else:
consistent &= aa == aa
return consistent
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : str = True
snake_case__ : tuple = tree_map(lambda snake_case_ : a.shape if isinstance(snake_case_ , torch.Tensor ) else a , snake_case_ , snake_case_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case_ )
snake_case__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , snake_case_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case__ : str = False
if not consistent:
snake_case__ : Dict = self._determine_favorable_chunk_size(
snake_case_ , snake_case_ , snake_case_ , )
snake_case__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 347 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """depth_multiplier""" ) )
class lowerCamelCase :
def __init__( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any=1_3 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : int=3_2 , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Any=8 , __UpperCAmelCase : str=6 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any="relu6" , __UpperCAmelCase : str=1_2_8_0 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Dict=1_0 , __UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = depth_multiplier
SCREAMING_SNAKE_CASE__ = depth_divisible_by
SCREAMING_SNAKE_CASE__ = min_depth
SCREAMING_SNAKE_CASE__ = expand_ratio
SCREAMING_SNAKE_CASE__ = tf_padding
SCREAMING_SNAKE_CASE__ = output_stride
SCREAMING_SNAKE_CASE__ = first_layer_is_expansion
SCREAMING_SNAKE_CASE__ = finegrained_output
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> str:
SCREAMING_SNAKE_CASE__ = MobileNetVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileNetVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileNetVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ : str = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
def check_hidden_states_output(__UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = 1_6
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = MobileNetVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
SCREAMING_SNAKE_CASE__ = model.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 713 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 616 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = LayoutLMTokenizer
UpperCAmelCase__ = LayoutLMTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
super().setUp()
A__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase__ : List[str]) ->Optional[Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str]) ->Tuple:
'''simple docstring'''
A__ = '''UNwant\u00E9d,running'''
A__ = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file)
A__ = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(UpperCAmelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [7, 4, 5, 10, 8, 9])
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
pass
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : int = 'openai-gpt'
A : str = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE__=40478 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__="cls_index" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Dict = vocab_size
lowercase : List[Any] = n_positions
lowercase : List[Any] = n_embd
lowercase : str = n_layer
lowercase : str = n_head
lowercase : List[Any] = afn
lowercase : Union[str, Any] = resid_pdrop
lowercase : Optional[Any] = embd_pdrop
lowercase : Optional[int] = attn_pdrop
lowercase : Optional[Any] = layer_norm_epsilon
lowercase : int = initializer_range
lowercase : Tuple = summary_type
lowercase : Union[str, Any] = summary_use_proj
lowercase : List[str] = summary_activation
lowercase : List[str] = summary_first_dropout
lowercase : Any = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 319 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = """laion/clap-htsat-unfused"""
UpperCamelCase__ = tempfile.mkdtemp()
def A ( self : Optional[int] , **lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def A ( self : Dict , **lowercase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_feature_extractor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
UpperCamelCase__ = floats_list((3, 1_0_0_0) )
UpperCamelCase__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" )
UpperCamelCase__ = processor(audios=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
UpperCamelCase__ = """This is a test string"""
UpperCamelCase__ = processor(text=UpperCamelCase__ )
UpperCamelCase__ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase__ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 711 |
'''simple docstring'''
import math
def __magic_name__( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase__ = range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __magic_name__( _A , _A=1 , **_A ):
'''simple docstring'''
UpperCamelCase__ = factor * value
UpperCamelCase__ = value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value
| 265 | 0 |
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :list[str] ) -> str:
'''simple docstring'''
A_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 188 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = 0
@slow
def __UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__snake_case ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__snake_case ) ,0 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,1_2 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,2_0 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoConfig.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
# Check that tokenizer_type ≠ model_type
A_ = AutoTokenizer.from_pretrained(__snake_case ,config=__snake_case )
self.assertIsInstance(__snake_case ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,1_2 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(__snake_case ,'''vocab.txt''' ) )
A_ = AutoTokenizer.from_pretrained(__snake_case ,tokenizer_type='''bert''' ,use_fast=__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(__snake_case ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(__snake_case ,'''merges.txt''' ) )
A_ = AutoTokenizer.from_pretrained(__snake_case ,tokenizer_type='''gpt2''' ,use_fast=__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(__snake_case ,'''vocab.txt''' ) )
A_ = AutoTokenizer.from_pretrained(__snake_case ,tokenizer_type='''bert''' )
self.assertIsInstance(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(__snake_case ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(__snake_case ,'''merges.txt''' ) )
A_ = AutoTokenizer.from_pretrained(__snake_case ,tokenizer_type='''gpt2''' )
self.assertIsInstance(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
with pytest.raises(__snake_case ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__snake_case ,(BertTokenizer, BertTokenizerFast) )
if isinstance(__snake_case ,__snake_case ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,__snake_case )
else:
self.assertEqual(tokenizer.do_lower_case ,__snake_case )
self.assertEqual(tokenizer.model_max_length ,5_1_2 )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__snake_case ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
A_ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = TOKENIZER_MAPPING.values()
A_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__snake_case )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=__snake_case ) ,__snake_case )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,__snake_case )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=__snake_case )
A_ = '''Hello, world. How are you?'''
A_ = tokenizer.tokenize(__snake_case )
self.assertEqual('''[UNK]''' ,tokens[0] )
A_ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=__snake_case )
A_ = tokenizer.tokenize(__snake_case )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__snake_case ) ,__snake_case )
self.assertEqual(tokenizer.model_max_length ,5_1_2 )
self.assertEqual(tokenizer.vocab_size ,3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,1_2 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__snake_case ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = get_tokenizer_config('''bert-base-cased''' )
A_ = config.pop('''_commit_hash''' ,__snake_case )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__snake_case ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ = get_tokenizer_config(__snake_case )
self.assertDictEqual(__snake_case ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ = AutoTokenizer.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = get_tokenizer_config(__snake_case )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
A_ = CustomTokenizer.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' ,__snake_case )
# Can register in two steps
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(__snake_case ,fast_tokenizer_class=__snake_case )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__snake_case ,slow_tokenizer_class=__snake_case ,fast_tokenizer_class=__snake_case )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoTokenizer.register(__snake_case ,fast_tokenizer_class=__snake_case )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = BertTokenizerFast.from_pretrained(__snake_case )
bert_tokenizer.save_pretrained(__snake_case )
A_ = CustomTokenizerFast.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case ,use_fast=__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(__snake_case ):
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case )
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case ,trust_remote_code=__snake_case )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case ,trust_remote_code=__snake_case ,use_fast=__snake_case )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def __UpperCAmelCase ( self ):
"""simple docstring"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = False
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = NewTokenizer
__lowerCAmelCase : Union[str, Any] = False
try:
AutoConfig.register('''custom''' ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
AutoTokenizer.register(__snake_case ,fast_tokenizer_class=__snake_case )
# If remote code is not set, the default is to use local
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=__snake_case )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=__snake_case )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case ,'''bert-base is not a local folder and is not a valid model identifier''' ):
A_ = AutoTokenizer.from_pretrained('''bert-base''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__snake_case ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A_ = AutoTokenizer.from_pretrained(__snake_case ,revision='''aaaaaa''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
A_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 188 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : List[str] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : str = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
snake_case__ : List[str] = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :int = SqueezeBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case_ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Union[str, Any] = getattr(snake_case_ , normalizer_state.pop('type' ) )
UpperCAmelCase_ : List[str] = do_lower_case
UpperCAmelCase_ : str = strip_accents
UpperCAmelCase_ : Any = tokenize_chinese_chars
UpperCAmelCase_ : Union[str, Any] = normalizer_class(**snake_case_ )
UpperCAmelCase_ : Optional[Any] = do_lower_case
def _UpperCamelCase ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 389 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Dict = '''Hello world! cécé herlolip'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ )
roberta.eval() # disable dropout
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase_ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCamelCase_ )
UpperCAmelCase_ : str = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Tuple = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : int = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : str = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : List[str] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : Tuple = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : int = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.bias
UpperCAmelCase_ : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : Optional[int] = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : Any = model(lowerCamelCase_ )[0]
if classification_head:
UpperCAmelCase_ : Tuple = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) )
else:
UpperCAmelCase_ : Any = roberta.model(lowerCamelCase_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : List[Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
snake_case__ : List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 389 | 1 |
"""simple docstring"""
A = [0, 2, 4, 6, 8]
A = [1, 3, 5, 7, 9]
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: list[int] , lowerCamelCase_: int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case : Any = 0
for digit in range(1_0 ):
snake_case : Union[str, Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , lowerCamelCase_ , lowerCamelCase_ )
return result
snake_case : Dict = 0
for digita in range(1_0 ):
snake_case : Tuple = digita
if (remainder + digita) % 2 == 0:
snake_case : Union[str, Any] = ODD_DIGITS
else:
snake_case : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
snake_case : str = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , lowerCamelCase_ , lowerCamelCase_ , )
return result
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int = 9 ):
"""simple docstring"""
snake_case : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCamelCase_ , 0 , [0] * length , lowerCamelCase_ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 449 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
def __init__( self : str , _lowercase : Tuple , _lowercase : List[Any]=100 , _lowercase : Union[str, Any]=13 , _lowercase : Tuple=30 , _lowercase : Dict=2 , _lowercase : Union[str, Any]=3 , _lowercase : Union[str, Any]=True , _lowercase : Dict=True , _lowercase : Dict=32 , _lowercase : Tuple=4 , _lowercase : List[Any]=4 , _lowercase : List[Any]=37 , _lowercase : List[Any]="gelu" , _lowercase : int=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : Dict=10 , _lowercase : List[str]=0.02 , _lowercase : Optional[int]=3 , _lowercase : Tuple=None , _lowercase : Optional[Any]=[0, 1, 2, 3] , ) -> Any:
snake_case : str = parent
snake_case : List[Any] = 100
snake_case : Any = batch_size
snake_case : str = image_size
snake_case : Optional[Any] = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : str = is_training
snake_case : List[str] = use_labels
snake_case : Optional[Any] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Tuple = scope
snake_case : Optional[int] = out_indices
snake_case : List[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case : Optional[Any] = (image_size // patch_size) ** 2
snake_case : Any = num_patches + 1
def __lowercase ( self : Tuple ) -> Union[str, Any]:
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
snake_case : Optional[int] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self : str ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowercase ( self : Union[str, Any] , _lowercase : int , _lowercase : Any , _lowercase : List[Any] , _lowercase : List[Any] ) -> Any:
snake_case : int = BeitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case : List[str] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[Any] ) -> Tuple:
snake_case : str = BeitForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case : Tuple = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowercase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[Any] ) -> str:
snake_case : str = self.type_sequence_label_size
snake_case : Union[str, Any] = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : Dict = 1
snake_case : Tuple = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : List[str] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : str , _lowercase : Dict , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : List[Any] ) -> List[Any]:
snake_case : List[Any] = self.num_labels
snake_case : Union[str, Any] = BeitForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
snake_case : Tuple = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case : Any = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowercase ( self : Any ) -> Dict:
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
__magic_name__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : Optional[Any] = BeitModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def __lowercase ( self : int ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __lowercase ( self : Optional[Any] ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowercase ( self : List[str] ) -> str:
pass
def __lowercase ( self : List[str] ) -> Union[str, Any]:
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[str] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def __lowercase ( self : List[Any] ) -> List[Any]:
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(_lowercase )
snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def __lowercase ( self : Any ) -> List[str]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __lowercase ( self : Optional[int] ) -> str:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def __lowercase ( self : List[Any] ) -> Tuple:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]:
continue
snake_case : List[str] = model_class(_lowercase )
model.to(_lowercase )
model.train()
snake_case : Dict = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
snake_case : Tuple = model(**_lowercase ).loss
loss.backward()
def __lowercase ( self : Dict ) -> str:
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case : Dict = False
snake_case : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case : str = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
snake_case : Dict = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
snake_case : str = model(**_lowercase ).loss
loss.backward()
def __lowercase ( self : List[Any] ) -> Optional[int]:
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(config=_lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __lowercase ( self : Optional[Any] ) -> List[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = BeitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
@cached_property
def __lowercase ( self : Any ) -> List[str]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __lowercase ( self : Dict ) -> Tuple:
snake_case : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_lowercase )
snake_case : str = self.default_image_processor
snake_case : int = prepare_img()
snake_case : int = image_processor(images=_lowercase , return_tensors="pt" ).pixel_values.to(_lowercase )
# prepare bool_masked_pos
snake_case : str = torch.ones((1, 196) , dtype=torch.bool ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case : Optional[Any] = model(pixel_values=_lowercase , bool_masked_pos=_lowercase )
snake_case : Any = outputs.logits
# verify the logits
snake_case : int = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _lowercase )
snake_case : List[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowercase , atol=1E-2 ) )
@slow
def __lowercase ( self : List[str] ) -> Tuple:
snake_case : List[Any] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_lowercase )
snake_case : Tuple = self.default_image_processor
snake_case : Dict = prepare_img()
snake_case : int = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**_lowercase )
snake_case : Union[str, Any] = outputs.logits
# verify the logits
snake_case : Dict = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _lowercase )
snake_case : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 ) )
snake_case : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowercase )
@slow
def __lowercase ( self : Union[str, Any] ) -> List[str]:
snake_case : str = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_lowercase )
snake_case : int = self.default_image_processor
snake_case : List[str] = prepare_img()
snake_case : Tuple = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case : Optional[Any] = model(**_lowercase )
snake_case : str = outputs.logits
# verify the logits
snake_case : int = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _lowercase )
snake_case : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 ) )
snake_case : Dict = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowercase )
@slow
def __lowercase ( self : int ) -> Union[str, Any]:
snake_case : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
snake_case : Optional[int] = model.to(_lowercase )
snake_case : Optional[Any] = BeitImageProcessor(do_resize=_lowercase , size=640 , do_center_crop=_lowercase )
snake_case : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
snake_case : Tuple = Image.open(ds[0]["file"] )
snake_case : List[Any] = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**_lowercase )
snake_case : Any = outputs.logits
# verify the logits
snake_case : Union[str, Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _lowercase )
snake_case : Dict = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
snake_case : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_lowercase , )
else:
snake_case : List[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def __lowercase ( self : List[str] ) -> Tuple:
snake_case : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
snake_case : Tuple = model.to(_lowercase )
snake_case : List[str] = BeitImageProcessor(do_resize=_lowercase , size=640 , do_center_crop=_lowercase )
snake_case : Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
snake_case : Union[str, Any] = Image.open(ds[0]["file"] )
snake_case : List[str] = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**_lowercase )
snake_case : Tuple = outputs.logits.detach().cpu()
snake_case : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(500, 300)] )
snake_case : List[str] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowercase )
snake_case : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 449 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[int]:
if isinstance(__UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE__ = tf.shape(__UpperCAmelCase )
if tensor.shape == tf.TensorShape(__UpperCAmelCase ):
return dynamic
SCREAMING_SNAKE_CASE__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__UpperCAmelCase )]
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=__UpperCAmelCase , name=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-5 , __UpperCAmelCase=-1 ) -> Optional[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tf.nn.moments(__UpperCAmelCase , axes=[axis] , keepdims=__UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE__ = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE__ = shape_list(__UpperCAmelCase )[axis]
SCREAMING_SNAKE_CASE__ = tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE__ = tf.nn.batch_normalization(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , offset=__UpperCAmelCase , scale=__UpperCAmelCase , variance_epsilon=__UpperCAmelCase , )
return outputs
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=-1 ) -> Optional[int]:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE__ = tf.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> tf.Tensor:
if not isinstance(__UpperCAmelCase , tf.Tensor ):
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(__UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
__UpperCAmelCase , tf.cast(__UpperCAmelCase , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(__UpperCAmelCase )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE__ = [x for x in data if len(__UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = np.array_split(__UpperCAmelCase , __UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE__ = np.array_split(__UpperCAmelCase , __UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = chunk_data
else:
SCREAMING_SNAKE_CASE__ = data
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
if name in group.attrs:
SCREAMING_SNAKE_CASE__ = [n.decode("utf8" ) if hasattr(__UpperCAmelCase , "decode" ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__UpperCAmelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
def _expand_single_ad_tensor(__UpperCAmelCase ):
if isinstance(__UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __UpperCAmelCase )
| 538 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''audio-spectrogram-transformer'''
def __init__( self : Tuple , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=1e-12 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : Optional[Any]=1_024 , UpperCAmelCase__ : List[str]=128 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = patch_size
A__ = qkv_bias
A__ = frequency_stride
A__ = time_stride
A__ = max_length
A__ = num_mel_bins
| 87 | def a__ ( __UpperCamelCase ):
if length <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 140 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase ( __snake_case ):
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : float ):
"""simple docstring"""
return 0.0
def _lowercase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _lowercase ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = 512
UpperCamelCase = [1] + [0] * (size - 1)
UpperCamelCase = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
UpperCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase = 20 * np.logaa(SCREAMING_SNAKE_CASE_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
UpperCamelCase = get_bounds(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(SCREAMING_SNAKE_CASE_ )
plt.show()
def _lowercase ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = 512
UpperCamelCase = [1] + [0] * (size - 1)
UpperCamelCase = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
UpperCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE_ , -2 * pi ) )
plt.show()
| 181 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowercase ( *SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = list(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowercase ( SCREAMING_SNAKE_CASE_ : Exception ):
"""simple docstring"""
UpperCamelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowercase ( SCREAMING_SNAKE_CASE_ : callable = None , SCREAMING_SNAKE_CASE_ : int = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE_ , starting_batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase = list(inspect.signature(SCREAMING_SNAKE_CASE_ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE_ ) < (len(SCREAMING_SNAKE_CASE_ ) + 1):
UpperCamelCase = """, """.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'Batch size was passed into `{function.__name__}` as the first argument when called.'
f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 181 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = tempfile.mkdtemp()
# fmt: off
__magic_name__ : int = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__magic_name__ : List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
__magic_name__ : Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__magic_name__ : Optional[Any] = {"unk_token": "<unk>"}
__magic_name__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
__magic_name__ : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__magic_name__ : Union[str, Any] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def __lowerCAmelCase ( self : int , **_A : int ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __lowerCAmelCase ( self : List[Any] , **_A : List[Any] ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __lowerCAmelCase ( self : Union[str, Any] , **_A : Dict ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __lowerCAmelCase ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
__magic_name__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ : Optional[Any] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Optional[Any] = self.get_rust_tokenizer()
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Any = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__magic_name__ : Any = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__magic_name__ : List[str] = self.get_image_processor(do_normalize=A_ )
__magic_name__ : str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __lowerCAmelCase ( self : str ) -> int:
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : List[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__magic_name__ : List[str] = self.prepare_image_inputs()
__magic_name__ : Dict = image_processor(A_ , return_tensors='np' )
__magic_name__ : Optional[Any] = processor(images=A_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : List[str] ) -> Dict:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Optional[int] = self.get_tokenizer()
__magic_name__ : int = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__magic_name__ : Union[str, Any] = "lower newer"
__magic_name__ : List[str] = processor(text=A_ , return_tensors='np' )
__magic_name__ : Union[str, Any] = tokenizer(A_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.get_image_processor()
__magic_name__ : Optional[int] = self.get_tokenizer()
__magic_name__ : Any = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__magic_name__ : Optional[int] = "lower newer"
__magic_name__ : List[Any] = self.prepare_image_inputs()
__magic_name__ : Optional[int] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : List[Any] = "google/owlvit-base-patch32"
__magic_name__ : List[Any] = OwlViTProcessor.from_pretrained(A_ )
__magic_name__ : Tuple = ["cat", "nasa badge"]
__magic_name__ : Tuple = processor(text=A_ )
__magic_name__ : int = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = "google/owlvit-base-patch32"
__magic_name__ : Dict = OwlViTProcessor.from_pretrained(A_ )
__magic_name__ : Optional[Any] = [["cat", "nasa badge"], ["person"]]
__magic_name__ : Optional[int] = processor(text=A_ )
__magic_name__ : List[Any] = 16
__magic_name__ : Dict = len(A_ )
__magic_name__ : Optional[int] = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : List[str] = "google/owlvit-base-patch32"
__magic_name__ : List[Any] = OwlViTProcessor.from_pretrained(A_ )
__magic_name__ : int = ["cat", "nasa badge"]
__magic_name__ : Union[str, Any] = processor(text=A_ )
__magic_name__ : int = 16
__magic_name__ : Optional[Any] = inputs["input_ids"]
__magic_name__ : Union[str, Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : str = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : List[str] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__magic_name__ : Optional[Any] = self.prepare_image_inputs()
__magic_name__ : Optional[Any] = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Union[str, Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__magic_name__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : Optional[int] = processor.batch_decode(A_ )
__magic_name__ : str = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ ) | 561 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[Any] = (32, 32)
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
def extract(*A_ , **A_ ):
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.ones([0] )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.pixel_values.to(A_ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Dict = 77
_UpperCAmelCase : Optional[int] = self.dummy_image.to(A_ )
_UpperCAmelCase : List[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : Dict = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Dict = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , return_dict=A_ , )[0]
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : int = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.dummy_cond_unet
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[Any] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Any = 77
_UpperCAmelCase : List[Any] = self.dummy_image.to(A_ )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : Optional[Any] = vae.half()
_UpperCAmelCase : int = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : List[str] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : int = alt_pipe(
[prompt] , generator=A_ , num_inference_steps=2 , output_type="np" , image=A_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : Dict = init_image.resize((760, 504) )
_UpperCAmelCase : str = "BAAI/AltDiffusion"
_UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
_UpperCAmelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase : Optional[int] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_UpperCAmelCase : Optional[int] = init_image.resize((768, 512) )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_UpperCAmelCase : int = "BAAI/AltDiffusion"
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : int = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 300 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
requires_backends(self ,'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : List[Any]=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : int=None ) -> Any:
'''simple docstring'''
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int ,_SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def A( self : Dict ,_SCREAMING_SNAKE_CASE : List[Any] ,_SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
'''simple docstring'''
A = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
A = self.tokenizer(text=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,header_text=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
A = self.tokenizer(_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
A = self.image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def A( self : str ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Dict=None ) -> List[str]:
'''simple docstring'''
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] ,_SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name )
A = self.model.generate(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
return model_outputs
def A( self : Dict ,_SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
A = []
for output_ids in model_outputs:
A = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE ,)
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 110 | 0 |
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__A : Dict = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCAmelCase__ )
if number < 1:
__A : Union[str, Any] = f"Input value of [number={number}] must be > 0"
raise ValueError(UpperCAmelCase__ )
__A : str = 1
for i in range(1 , UpperCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
import argparse
from collections import defaultdict
import yaml
a = 'docs/source/en/_toctree.yml'
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = defaultdict(UpperCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(UpperCAmelCase__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : s["title"].lower() )
def UpperCAmelCase_ ( UpperCAmelCase__=False ):
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]["""sections"""]
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]["""sections"""]
lowercase_ = [(idx, section) for idx, section in enumerate(UpperCAmelCase__ ) if """sections""" in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc["""sections"""]
lowercase_ = clean_model_doc_toc(UpperCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCAmelCase__ , allow_unicode=UpperCAmelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 412 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__UpperCAmelCase : int = logging.get_logger(__name__)
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *__snake_case : Any , **__snake_case : Optional[int] ) -> None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 249 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self : Any , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : str=99 , __snake_case : Dict=24 , __snake_case : int=2 , __snake_case : Dict=6 , __snake_case : str=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=None , __snake_case : Any=1000 , ) -> str:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : List[str] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : str = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : str = num_labels
_a : int = scope
_a : Tuple = range_bbox
def snake_case_ ( self : Any ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Any = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : str = bbox[i, j, 0]
_a : List[Any] = t
_a : Any = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Union[str, Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , ) -> Any:
_a : Union[str, Any] = LiltModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Union[str, Any] = model(__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , ) -> Tuple:
_a : List[str] = self.num_labels
_a : Optional[Any] = LiltForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[Any] = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ) -> Optional[int]:
_a : List[str] = LiltForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[int] ) -> List[str]:
return True
def snake_case_ ( self : int ) -> Dict:
_a : Union[str, Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : str ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def snake_case_ ( self : Dict ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = LiltModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> str:
_a : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__snake_case )
_a : List[str] = torch.tensor([[1, 2]] , device=__snake_case )
_a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=__snake_case , bbox=__snake_case )
_a : Optional[Any] = torch.Size([1, 2, 768] )
_a : Optional[Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __snake_case , atol=1E-3 ) )
| 249 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
a__ = 42
a__ = 42
a__ = 42
@dataclass
class lowerCamelCase_ :
a__ = 42
a__ = 42
a__ = None
a__ = None
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''train'''
a__ = '''dev'''
a__ = '''test'''
class lowerCamelCase_ :
@staticmethod
def A ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A ( __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase=1 , __lowerCAmelCase="[SEP]" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=-1_0_0 , __lowerCAmelCase=0 , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :str = {label: i for i, label in enumerate(__lowerCAmelCase )}
__magic_name__ :int = []
for ex_index, example in enumerate(__lowerCAmelCase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , __lowerCAmelCase , len(__lowerCAmelCase ) )
__magic_name__ :Tuple = []
__magic_name__ :str = []
for word, label in zip(example.words , example.labels ):
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCAmelCase ) > 0:
tokens.extend(__lowerCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__ :List[Any] = tokenizer.num_special_tokens_to_add()
if len(__lowerCAmelCase ) > max_seq_length - special_tokens_count:
__magic_name__ :Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__ :Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__ :Dict = [sequence_a_segment_id] * len(__lowerCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__ :Tuple = [cls_token] + tokens
__magic_name__ :Any = [pad_token_label_id] + label_ids
__magic_name__ :Optional[int] = [cls_token_segment_id] + segment_ids
__magic_name__ :int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__ :Dict = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase )
# Zero-pad up to the sequence length.
__magic_name__ :Any = max_seq_length - len(__lowerCAmelCase )
if pad_on_left:
__magic_name__ :Optional[Any] = ([pad_token] * padding_length) + input_ids
__magic_name__ :Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__ :Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__ :Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ :Tuple = None
features.append(
InputFeatures(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = nn.CrossEntropyLoss().ignore_index
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
# Load data features from cache or dataset file
__magic_name__ :Any = os.path.join(
__lowerCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ :Optional[int] = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__magic_name__ :Optional[int] = torch.load(__lowerCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__magic_name__ :int = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ :str = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ :
a__ = 42
a__ = -1_00
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ :Tuple = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ :Any = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__ :Optional[int] = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
| 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand()
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase ( A ):
@staticmethod
def snake_case ( __lowercase : ArgumentParser ):
"""simple docstring"""
__lowercase =parser.add_parser('env' )
download_parser.set_defaults(func=__lowercase )
download_parser.add_argument(
'--accelerate-config_file' , default=__lowercase , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=__lowercase )
def __init__( self : int , __lowercase : Any , *__lowercase : str ):
"""simple docstring"""
__lowercase =accelerate_config_file
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase ='not installed'
if is_safetensors_available():
import safetensors
__lowercase =safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
__lowercase =f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowercase ='not installed'
__lowercase =__lowercase ='not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowercase =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__lowercase ):
__lowercase =load_config_from_file(self._accelerate_config_file ).to_dict()
__lowercase =(
'\n'.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__lowercase , __lowercase )
else f'''\t{accelerate_config}'''
)
__lowercase ='not installed'
__lowercase ='NA'
if is_torch_available():
import torch
__lowercase =torch.__version__
__lowercase =torch.cuda.is_available()
__lowercase ='not installed'
__lowercase ='NA'
if is_tf_available():
import tensorflow as tf
__lowercase =tf.__version__
try:
# deprecated in v2.1
__lowercase =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowercase =bool(tf.config.list_physical_devices('GPU' ) )
__lowercase ='not installed'
__lowercase ='not installed'
__lowercase ='not installed'
__lowercase ='NA'
if is_flax_available():
import flax
import jax
import jaxlib
__lowercase =flax.__version__
__lowercase =jax.__version__
__lowercase =jaxlib.__version__
__lowercase =jax.lib.xla_bridge.get_backend().platform
__lowercase ={
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f'''{safetensors_version}''',
'Accelerate version': f'''{accelerate_version}''',
'Accelerate config': f'''{accelerate_config_str}''',
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'Tensorflow version (GPU?)': f'''{tf_version} ({tf_cuda_available})''',
'Flax version (CPU?/GPU?/TPU?)': f'''{flax_version} ({jax_backend})''',
'Jax version': f'''{jax_version}''',
'JaxLib version': f'''{jaxlib_version}''',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def snake_case ( __lowercase : Optional[Any] ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 119 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[str] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
lowerCAmelCase__ : str = model(__A )["""last_hidden_state"""]
lowerCAmelCase__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,__A )
# compare the actual values for a slice.
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 715 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase__ : Tuple = mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCAmelCase__ : List[str] = max(
mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase__ : Union[str, Any] = val
return f[i][j]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase__ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase__ : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not (isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowerCAmelCase__ : Tuple = len(UpperCamelCase )
if num_items != len(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(UpperCamelCase )} values"""
)
raise ValueError(UpperCamelCase )
for i in range(UpperCamelCase ):
if not isinstance(wt[i] , UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : str = knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : set = set()
_construct_solution(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return optimal_val, example_optional_set
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase )
else:
optimal_set.add(UpperCamelCase )
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , j - wt[i - 1] , UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = [3, 2, 4, 4]
_lowerCAmelCase = [4, 3, 2, 3]
_lowerCAmelCase = 4
_lowerCAmelCase = 6
_lowerCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCAmelCase , _lowerCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCAmelCase , _lowerCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 160 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = (n * (n + 1) // 2) ** 2
snake_case__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
'''simple docstring'''
from math import factorial
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 702 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : Dict = {
"""input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 79 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __a :
UpperCamelCase_ : Dict = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Union[str, Any] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
UpperCamelCase_ : str = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Dict = field(default=_lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Union[str, Any] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __a :
UpperCamelCase_ : List[str] = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
UpperCamelCase_ : Dict = field(
default=_lowerCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
UpperCamelCase_ : Tuple = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : Tuple = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase__ ( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
UpperCamelCase = import_module("tasks" )
try:
UpperCamelCase = getattr(_snake_case , model_args.task_type )
UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCamelCase = token_classification_task.get_labels(data_args.labels )
UpperCamelCase = dict(enumerate(_snake_case ) )
UpperCamelCase = len(_snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , idalabel=_snake_case , labelaid={label: i for i, label in enumerate(_snake_case )} , cache_dir=model_args.cache_dir , )
UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple[List[int], List[int]]:
UpperCamelCase = np.argmax(_snake_case , axis=2 )
UpperCamelCase = preds.shape
UpperCamelCase = [[] for _ in range(_snake_case )]
UpperCamelCase = [[] for _ in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase_ ) -> Dict:
UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_snake_case , _snake_case ),
"precision": precision_score(_snake_case , _snake_case ),
"recall": recall_score(_snake_case , _snake_case ),
"f1": fa_score(_snake_case , _snake_case ),
}
# Data collator
UpperCamelCase = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _snake_case , _snake_case )
writer.write("%s = %s\n" % (key, value) )
results.update(_snake_case )
# Predict
if training_args.do_predict:
UpperCamelCase = TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase = trainer.predict(_snake_case )
UpperCamelCase = align_predictions(_snake_case , _snake_case )
UpperCamelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(_snake_case , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , _snake_case , _snake_case )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
UpperCamelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(_snake_case , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(_snake_case , _snake_case , _snake_case )
return results
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 554 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _snake_case ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = (16, 32, 96, 256)
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Any = self.block_out_channels[i]
__magic_name__ : Optional[int] = self.block_out_channels[i + 1]
__magic_name__ : List[str] = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Optional[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Union[str, Any] = blocks
__magic_name__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
__magic_name__ : List[Any] = self.conv_in(_a )
__magic_name__ : Dict = nn.silu(_a )
for block in self.blocks:
__magic_name__ : Dict = block(_a )
__magic_name__ : int = nn.silu(_a )
__magic_name__ : str = self.conv_out(_a )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case , snake_case ):
UpperCamelCase__ = 32
UpperCamelCase__ = 4
UpperCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ = False
UpperCamelCase__ = (320, 640, 1280, 1280)
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = None
UpperCamelCase__ = 1280
UpperCamelCase__ = 0.0
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = "rgb"
UpperCamelCase__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE ( self , _a ):
# init input tensors
__magic_name__ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : Dict = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ : int = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : Optional[int] = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : Dict = jax.random.split(_a )
__magic_name__ : str = {"params": params_rng, "dropout": dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : str = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : str = FlaxTimestepEmbedding(_a , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Tuple = self.only_cross_attention
if isinstance(_a , _a ):
__magic_name__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : List[Any] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = block_out_channels[0]
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[int] = output_channel
__magic_name__ : int = block_out_channels[i]
__magic_name__ : List[str] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : List[Any] = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
__magic_name__ : List[Any] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
__magic_name__ : Any = down_blocks
__magic_name__ : Any = controlnet_down_blocks
# mid
__magic_name__ : Optional[int] = block_out_channels[-1]
__magic_name__ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : Optional[int] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : int = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
__magic_name__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Dict = jnp.expand_dims(_a , 0 )
__magic_name__ : Any = self.time_proj(_a )
__magic_name__ : int = self.time_embedding(_a )
# 2. pre-process
__magic_name__ : Dict = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Any = self.conv_in(_a )
__magic_name__ : List[str] = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Tuple = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
__magic_name__ , __magic_name__ : List[str] = down_block(_a , _a , _a , deterministic=not train )
else:
__magic_name__ , __magic_name__ : List[Any] = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : int = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Any = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
__magic_name__ : Dict = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : Optional[Any] = controlnet_down_block_res_samples
__magic_name__ : int = self.controlnet_mid_block(_a )
# 6. scaling
__magic_name__ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 124 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=[1, 1, 2] , lowerCamelCase=1 , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=37 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=5_12 , lowerCamelCase=3 , lowerCamelCase=0.0_2 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=False , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = block_sizes
snake_case__ = num_decoder_layers
snake_case__ = d_model
snake_case__ = n_head
snake_case__ = d_head
snake_case__ = d_inner
snake_case__ = hidden_act
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = 2
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case__ = n_head
# Used in the tests to check the size of the first hidden state
snake_case__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case__ = self.num_hidden_layers + 2
def A_ ( self ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = TFFunnelModel(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
snake_case__ = [input_ids, input_mask]
snake_case__ = model(lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case__ = False
snake_case__ = TFFunnelModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case__ = False
snake_case__ = TFFunnelModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = TFFunnelBaseModel(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
snake_case__ = [input_ids, input_mask]
snake_case__ = model(lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case__ = False
snake_case__ = TFFunnelBaseModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case__ = False
snake_case__ = TFFunnelBaseModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = TFFunnelForPreTraining(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = TFFunnelForMaskedLM(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = self.num_labels
snake_case__ = TFFunnelForSequenceClassification(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = self.num_choices
snake_case__ = TFFunnelForMultipleChoice(config=lowerCamelCase )
snake_case__ = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = self.num_labels
snake_case__ = TFFunnelForTokenClassification(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
snake_case__ = TFFunnelForQuestionAnswering(config=lowerCamelCase )
snake_case__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : Optional[Any] = False
def A_ ( self ):
snake_case__ = TFFunnelModelTester(self )
snake_case__ = ConfigTester(self , config_class=lowerCamelCase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_A : Optional[Any] = False
_A : List[Any] = False
def A_ ( self ):
snake_case__ = TFFunnelModelTester(self , base=lowerCamelCase )
snake_case__ = ConfigTester(self , config_class=lowerCamelCase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
| 530 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case__ = ksize + 1
snake_case__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case__ = x - ksize // 2
snake_case__ = y - ksize // 2
# degree to radiant
snake_case__ = theta / 180 * np.pi
snake_case__ = np.cos(_theta )
snake_case__ = np.sin(_theta )
# get kernel x
snake_case__ = cos_theta * px + sin_theta * py
# get kernel y
snake_case__ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 255
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 530 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int ) -> str:
"""simple docstring"""
__a = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__a = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(SCREAMING_SNAKE_CASE__ ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE__ ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(SCREAMING_SNAKE_CASE__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE__ )
__a = [''.join(SCREAMING_SNAKE_CASE__ ) for row in temp_grid]
__a = ''.join(SCREAMING_SNAKE_CASE__ )
return output_string
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int ) -> str:
"""simple docstring"""
__a = []
__a = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__a = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(SCREAMING_SNAKE_CASE__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__a = 0
for row in temp_grid: # fills in the characters
__a = input_string[counter : counter + len(SCREAMING_SNAKE_CASE__ )]
grid.append(list(SCREAMING_SNAKE_CASE__ ) )
counter += len(SCREAMING_SNAKE_CASE__ )
__a = '' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = position % (lowest * 2) # puts it in bounds
__a = min(SCREAMING_SNAKE_CASE__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> dict[int, str]:
"""simple docstring"""
__a = {}
for key_guess in range(1, len(SCREAMING_SNAKE_CASE__ ) ): # tries every key
__a = decrypt(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 448 |
'''simple docstring'''
from timeit import timeit
__UpperCamelCase : int = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
__a = 0
__a = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
__a = len(SCREAMING_SNAKE_CASE__ ) // 2
__a = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> None:
"""simple docstring"""
__a = f"""all({name}(key) is value for key, value in test_data.items())"""
__a = f"""from __main__ import test_data, {name}"""
__a = 500000
__a = timeit(stmt=SCREAMING_SNAKE_CASE__, setup=SCREAMING_SNAKE_CASE__, number=SCREAMING_SNAKE_CASE__ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""") | 448 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowercase : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
a__ : Optional[datasets.Features] = None
a__ : str = "utf-8"
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True # deprecated
a__ : Optional[int] = None # deprecated
a__ : int = 10 << 20 # 10MB
a__ : Optional[bool] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
a__ : str = JsonConfig
def a ( self : Optional[int] ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__UpperCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Union[str, Any] , _lowercase : Union[str, Any] ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
__UpperCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase = [files]
__UpperCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase = [files]
__UpperCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def a ( self : Tuple , _lowercase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
__UpperCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def a ( self : Optional[Any] , _lowercase : str ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
__UpperCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
__UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
__UpperCAmelCase = dataset
__UpperCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
__UpperCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
__UpperCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
__UpperCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(UpperCAmelCase_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
__UpperCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase_ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase_ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 702 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int ):
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError('''only integers accepted as input''' )
else:
__UpperCAmelCase = str(abs(snake_case_ ) )
__UpperCAmelCase = [list(snake_case_ ) for char in range(len(snake_case_ ) )]
for index in range(len(snake_case_ ) ):
num_transpositions[index].pop(snake_case_ )
return max(
int(''''''.join(list(snake_case_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 397 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase : Optional[Any] = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
__UpperCAmelCase : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase : List[str] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase : List[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
"""simple docstring"""
__UpperCAmelCase : str = dct.pop(UpperCamelCase )
__UpperCAmelCase : List[Any] = val
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
__UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase )
__UpperCAmelCase : Any = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase : Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase : Any = 1024
__UpperCAmelCase : int = 4096
__UpperCAmelCase : Tuple = 24
__UpperCAmelCase : Any = 16
__UpperCAmelCase : Union[str, Any] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = "relu"
__UpperCAmelCase : Any = 1024
__UpperCAmelCase : str = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
# load HuggingFace model
__UpperCAmelCase : Optional[Any] = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase )
__UpperCAmelCase : str = TrOCRForCausalLM(UpperCamelCase )
__UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , check_hash=UpperCamelCase )["model"]
__UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase : Optional[int] = state_dict.pop(UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
__UpperCAmelCase : Any = val
else:
__UpperCAmelCase : int = val
# load state dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image
__UpperCAmelCase : List[str] = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("roberta-large" )
__UpperCAmelCase : Dict = TrOCRProcessor(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = processor(images=prepare_img(UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
__UpperCAmelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase : Optional[Any] = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = outputs.logits
__UpperCAmelCase : List[str] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase : str = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase : int = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 77 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = DistilBertTokenizer
lowerCamelCase_ : Any = DistilBertTokenizerFast
lowerCamelCase_ : Union[str, Any] = True
@slow
def __UpperCAmelCase( self ):
__A : Any = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
__A : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
__A : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
__A : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__A : str = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 520 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( a , a ):
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 427 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a , a ):
# Construct model
if openai_config_file == "":
__snake_case = OpenAIGPTConfig()
else:
__snake_case = OpenAIGPTConfig.from_json_file(a )
__snake_case = OpenAIGPTModel(a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a , a , a )
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowercase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 427 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__a = logging.get_logger('transformers.models.encodec')
__a = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__a = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__a = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__a = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__a = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__a = []
__a = []
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase_ : List[Any] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : Tuple = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Any = value
elif weight_type == "weight_v":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
elif weight_type == "running_mean":
UpperCAmelCase_ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase_ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ : str = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ : str = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ : int = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : int = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_, UpperCAmelCase_ : Dict = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ : str = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f'''{name} was ignored''' )
continue
UpperCAmelCase_ : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
UpperCAmelCase_ : int = True
if "*" in mapped_key:
UpperCAmelCase_ : Optional[int] = name.split(_lowercase )[0].split('''.''' )[-2]
UpperCAmelCase_ : Optional[int] = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
UpperCAmelCase_ : List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : Tuple = '''weight_v'''
elif "weight_ih_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
UpperCAmelCase_ : Any = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
UpperCAmelCase_ : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
UpperCAmelCase_ : int = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
UpperCAmelCase_ : Union[str, Any] = '''bias_hh_l1'''
elif "bias" in name:
UpperCAmelCase_ : List[str] = '''bias'''
elif "weight" in name:
UpperCAmelCase_ : int = '''weight'''
elif "running_mean" in name:
UpperCAmelCase_ : Tuple = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase_ : Dict = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase_ : Tuple = '''num_batches_tracked'''
else:
UpperCAmelCase_ : Dict = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : int = EncodecConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase_ : List[str] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ : Union[str, Any] = [8, 5, 4, 4]
UpperCAmelCase_ : str = [2.2]
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 32000
UpperCAmelCase_ : Tuple = 2048
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : int = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ : List[str] = [8, 5, 4, 2]
UpperCAmelCase_ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase_ : Optional[int] = 48000
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = '''time_group_norm'''
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Union[str, Any] = 1.0
UpperCAmelCase_ : List[Any] = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCAmelCase_ : int = EncodecModel(_lowercase )
UpperCAmelCase_ : List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
UpperCAmelCase_ : int = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ : Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 30 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
"""simple docstring"""
import re
def A__ ( UpperCamelCase ):
A = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
_snake_case : Optional[int] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 709 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def A__ ( UpperCamelCase ):
A, A, A = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def A__ ( UpperCamelCase ):
return (gray > 127) & (gray <= 255)
def A__ ( UpperCamelCase , UpperCamelCase ):
A = np.zeros_like(UpperCamelCase )
A = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_snake_case : str = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_snake_case : Any = np.array(Image.open(lena_path))
# kernel to be applied
_snake_case : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_snake_case : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_snake_case : List[Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 524 | 0 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A_ = 1
for n in range(m + 1 ):
for k in range(1 ,__UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__a :Tuple = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__a :Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.') | 86 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) ->list:
lowerCamelCase__ : Optional[Any] =len(snake_case_ )
lowerCamelCase__ : Any =[]
for i in range(len(snake_case_ ) - pat_len + 1 ):
lowerCamelCase__ : str =True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
lowerCamelCase__ : Optional[int] =False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 174 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE_ (datasets.BuilderConfig ):
'''simple docstring'''
_a = None
class SCREAMING_SNAKE_CASE_ (datasets.ArrowBasedBuilder ):
'''simple docstring'''
_a = PandasConfig
def _lowerCAmelCase ( self : List[Any] ) ->Tuple:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Dict , __a : Optional[int] ) ->Optional[int]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
lowerCamelCase_ : Optional[int] = data_files
if isinstance(__a , __a ):
lowerCamelCase_ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : List[str] = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
lowerCamelCase_ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : str = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def _lowerCAmelCase ( self : Tuple , __a : pa.Table ) ->pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ : Union[str, Any] = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self : Optional[Any] , __a : Any ) ->Any:
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , """rb""" ) as f:
lowerCamelCase_ : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 703 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( A__ : int ) -> int:
lowerCamelCase_ : Union[str, Any] = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->List[str]:
"""simple docstring"""
__magic_name__ : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Dict = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Dict = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Tuple = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Optional[int] = 8
else:
__magic_name__ : List[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[str] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
__magic_name__ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', UpperCAmelCase ) == "1":
__magic_name__ : Dict = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__magic_name__ : List[Any] = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with='''all''', project_dir=args.project_dir )
else:
__magic_name__ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : List[Any] = config['''lr''']
__magic_name__ : str = int(config['''num_epochs'''] )
__magic_name__ : Tuple = int(config['''seed'''] )
__magic_name__ : str = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : str = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
__magic_name__ : Union[str, Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : Optional[int] = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Dict = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__magic_name__ : Dict = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__magic_name__ : Tuple = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : int = model(**UpperCAmelCase )
__magic_name__ : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__magic_name__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Tuple = model(**UpperCAmelCase )
__magic_name__ : Dict = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
}, step=UpperCAmelCase, )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ) ->int:
"""simple docstring"""
__magic_name__ : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''', action='''store_true''', help='''Whether to load in all available experiment trackers from the environment and use them for logging.''', )
parser.add_argument(
'''--project_dir''', type=UpperCAmelCase, default='''logs''', help='''Location on where to store experiment tracking logs` and relevent project information''', )
__magic_name__ : Tuple = parser.parse_args()
__magic_name__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 154 |
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__magic_name__ : Optional[Any] = 6
__magic_name__ : Dict = 1
__magic_name__ : Union[str, Any] = 1901
__magic_name__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__magic_name__ : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__magic_name__ : Optional[int] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__magic_name__ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__magic_name__ : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 154 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
snake_case__ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case__ : Optional[int] = primes[:idx]
break
snake_case__ : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case__ : Tuple = False
for r in range(_lowerCAmelCase ):
snake_case__ : Dict = pow(_lowerCAmelCase , d * 2**r , _lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case__ : int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __snake_case( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 709 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__a = list[list[float | int]]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Matrix:
snake_case__ : int = len(_lowerCAmelCase )
snake_case__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCAmelCase )]
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : float
for row in range(_lowerCAmelCase ):
for col in range(_lowerCAmelCase ):
snake_case__ : Optional[int] = matrix[row][col]
snake_case__ : Optional[Any] = vector[row][0]
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 0
while row < size and col < size:
# pivoting
snake_case__ : List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCAmelCase , _lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case__ , snake_case__ : Dict = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCAmelCase ):
snake_case__ : int = augmented[rowa][col] / augmented[row][col]
snake_case__ : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCAmelCase ):
for row in range(_lowerCAmelCase ):
snake_case__ : str = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCAmelCase )
]
def __snake_case( _lowerCAmelCase ) -> Callable[[int], int]:
snake_case__ : int = len(_lowerCAmelCase )
snake_case__ : Matrix = [[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
snake_case__ : Matrix = [[0] for _ in range(_lowerCAmelCase )]
snake_case__ : Matrix
snake_case__ : int
snake_case__ : int
snake_case__ : int
for x_val, y_val in enumerate(_lowerCAmelCase ):
for col in range(_lowerCAmelCase ):
snake_case__ : str = (x_val + 1) ** (size - col - 1)
snake_case__ : List[str] = y_val
snake_case__ : List[Any] = solve(_lowerCAmelCase , _lowerCAmelCase )
def interpolated_func(_lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCAmelCase ) )
return interpolated_func
def __snake_case( _lowerCAmelCase ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __snake_case( _lowerCAmelCase = question_function , _lowerCAmelCase = 10 ) -> int:
snake_case__ : list[int] = [func(_lowerCAmelCase ) for x_val in range(1 , order + 1 )]
snake_case__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case__ : int = 0
snake_case__ : Callable[[int], int]
snake_case__ : int
for poly in polynomials:
snake_case__ : Optional[Any] = 1
while func(_lowerCAmelCase ) == poly(_lowerCAmelCase ):
x_val += 1
ret += poly(_lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 301 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
a__ : str
a__ : str = None
@staticmethod
def snake_case_ ( ) -> Any:
raise NotImplementedError
def snake_case_ ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> int:
raise NotImplementedError
def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> Tuple:
raise NotImplementedError
def snake_case_ ( self : Any ) -> Dict:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def snake_case_ ( cls : Union[str, Any] ) -> Any:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
a__ : Optional[int] = '''optuna'''
@staticmethod
def snake_case_ ( ) -> Tuple:
return is_optuna_available()
def snake_case_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Tuple:
return default_hp_space_optuna(__lowerCAmelCase )
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
a__ : Dict = '''ray'''
a__ : Optional[Any] = '''\'ray[tune]\''''
@staticmethod
def snake_case_ ( ) -> Union[str, Any]:
return is_ray_available()
def snake_case_ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[Any]:
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
return default_hp_space_ray(__lowerCAmelCase )
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
a__ : List[str] = '''sigopt'''
@staticmethod
def snake_case_ ( ) -> List[str]:
return is_sigopt_available()
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Dict ) -> Any:
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : List[Any] ) -> str:
return default_hp_space_sigopt(__lowerCAmelCase )
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
a__ : Optional[int] = '''wandb'''
@staticmethod
def snake_case_ ( ) -> Dict:
return is_wandb_available()
def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[Any] ) -> Dict:
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> Dict:
return default_hp_space_wandb(__lowerCAmelCase )
UpperCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__ ) > 0:
_A = available_backends[0].name
if len(a__ ) > 1:
logger.info(
F'''{len(a__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 2 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
SCREAMING_SNAKE_CASE_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 517 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowercase: Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Tuple , lowercase__ : Any , lowercase__ : Any=7_68 ):
super().__init__(lowercase__ )
_lowerCAmelCase = proj_size
_lowerCAmelCase = CLIPVisionModel(lowercase__ )
_lowerCAmelCase = PaintByExampleMapper(lowercase__ )
_lowerCAmelCase = nn.LayerNorm(config.hidden_size )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Tuple=False ):
_lowerCAmelCase = self.model(pixel_values=lowercase__ )
_lowerCAmelCase = clip_output.pooler_output
_lowerCAmelCase = self.mapper(latent_states[:, None] )
_lowerCAmelCase = self.final_layer_norm(lowercase__ )
_lowerCAmelCase = self.proj_out(lowercase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : List[Any] ):
super().__init__()
_lowerCAmelCase = (config.num_hidden_layers + 1) // 5
_lowerCAmelCase = config.hidden_size
_lowerCAmelCase = 1
_lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(lowercase__ , lowercase__ , lowercase__ , activation_fn='gelu' , attention_bias=lowercase__ )
for _ in range(lowercase__ )
] )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[str] ):
for block in self.blocks:
_lowerCAmelCase = block(lowercase__ )
return hidden_states
| 225 | import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *lowercase__ : Dict , **lowercase__ : Union[str, Any] ):
pass
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = np.array(snake_case )
_lowerCAmelCase = npimg.shape
return {"hash": hashimage(snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
UpperCamelCase__ =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = MaskGenerationPipeline(model=lowercase__ , image_processor=lowercase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Any , lowercase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_lowerCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_lowerCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = 'facebook/sam-vit-huge'
_lowerCAmelCase = pipeline('mask-generation' , model=lowercase__ )
_lowerCAmelCase = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_lowerCAmelCase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_0_5_3},
] , )
| 225 | 1 |
"""simple docstring"""
import sys
lowerCAmelCase_ : Optional[int] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _lowerCAmelCase ( lowerCAmelCase = N ):
'''simple docstring'''
UpperCAmelCase = -sys.maxsize - 1
for i in range(len(__lowercase ) - 12 ):
UpperCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCAmelCase = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 673 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_snake_case = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase( cls ) -> int:
__UpperCamelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='test-model-flax' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='valid_org/test-model-flax-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = flatten_dict(modela.params )
__UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__UpperCamelCase = False
return models_are_equal
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='10KB' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 383 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __magic_name__ :
def __init__( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : str=19 , lowerCamelCase__ : List[str]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Dict=37 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : str=512 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Optional[int]=None , ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Optional[Any] = seq_length
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : List[str] = use_token_type_ids
UpperCamelCase__ : Any = use_labels
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_size
UpperCamelCase__ : Optional[int] = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Optional[Any] = num_labels
UpperCamelCase__ : Union[str, Any] = num_choices
UpperCamelCase__ : List[Any] = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : List[Any] = None
if self.use_input_mask:
UpperCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCamelCase__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = EsmForProteinFolding(config=lowerCamelCase__ ).float()
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : str = config_and_inputs
UpperCamelCase__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Tuple = False
A: Tuple = (EsmForProteinFolding,) if is_torch_available() else ()
A: Any = ()
A: Any = {} if is_torch_available() else {}
A: Any = False
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = EsmFoldModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@unittest.skip('''Does not support attention outputs''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class __magic_name__ ( __lowerCAmelCase):
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCamelCase__ : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase__ : Optional[Any] = model(lowerCamelCase__ )['''positions''']
UpperCamelCase__ : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCamelCase__ , atol=1E-4 ) )
| 106 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = "markuplm"
def __init__( self : List[Any] , lowerCamelCase__ : Optional[int]=30522 , lowerCamelCase__ : Dict=768 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=512 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : Optional[Any]=1E-1_2 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=256 , lowerCamelCase__ : List[str]=1024 , lowerCamelCase__ : Optional[int]=216 , lowerCamelCase__ : Any=1001 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Tuple=50 , lowerCamelCase__ : str="absolute" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Any = position_embedding_type
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : int = classifier_dropout
# additional properties
UpperCamelCase__ : int = max_depth
UpperCamelCase__ : Optional[Any] = max_xpath_tag_unit_embeddings
UpperCamelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase__ : List[Any] = tag_pad_id
UpperCamelCase__ : Optional[int] = subs_pad_id
UpperCamelCase__ : Optional[Any] = xpath_unit_hidden_size
| 106 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCamelCase_ : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase_ : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
lowerCamelCase_ : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
lowerCamelCase_ : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
lowerCamelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCamelCase_ : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCamelCase_ : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Tuple = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE_ :Tuple = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE_ :Dict = self.validation_dir
SCREAMING_SNAKE_CASE_ :Optional[int] = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowerCamelCase_ : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowerCamelCase_ : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCamelCase_ : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase_ : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCamelCase_ : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCamelCase_ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowerCamelCase_ : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : float = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , a , a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ :Tuple = training_args.get_process_log_level()
logger.setLevel(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ :Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
SCREAMING_SNAKE_CASE_ :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ :Tuple = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ :Optional[int] = ds["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ :Tuple = split["train"]
SCREAMING_SNAKE_CASE_ :Any = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ :int = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **a )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a )
else:
SCREAMING_SNAKE_CASE_ :int = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE_ :Tuple = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ :List[str] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
SCREAMING_SNAKE_CASE_ :Optional[Any] = ViTMAEForPreTraining(a )
if training_args.do_train:
SCREAMING_SNAKE_CASE_ :Any = ds["train"].column_names
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE_ :Any = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE_ :List[Any] = "image"
elif "img" in column_names:
SCREAMING_SNAKE_CASE_ :List[Any] = "img"
else:
SCREAMING_SNAKE_CASE_ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ :List[Any] = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE_ :List[str] = Compose(
[
Lambda(lambda a : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(a ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [transforms(a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ :Any = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE_ :Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE_ :Dict = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE_ :str = Trainer(
model=a , args=a , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=a , data_collator=a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ :Dict = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ :str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ :Any = last_checkpoint
SCREAMING_SNAKE_CASE_ :Dict = trainer.train(resume_from_checkpoint=a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ :Dict = trainer.evaluate()
trainer.log_metrics("eval" , a )
trainer.save_metrics("eval" , a )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ :Any = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
def lowercase ( a ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 631 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE__ = False
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : str , UpperCAmelCase : Dict=32):
set_seed(0)
SCREAMING_SNAKE_CASE_ :Dict = UNetaDModel(sample_size=UpperCAmelCase , in_channels=3 , out_channels=3)
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :List[str] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE_ :Any = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :str = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
SCREAMING_SNAKE_CASE_ :Any = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :str = [torch.randn((4, 3, 32, 32)).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :List[Any] = [torch.randint(0 , 10_00 , (4,)).long().to(UpperCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
| 631 | 1 |
"""simple docstring"""
from typing import Any
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
_validation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# Creates data structures and fill initial step
_lowercase : dict = {}
_lowercase : dict = {}
for state in states_space:
_lowercase : Tuple = observations_space[0]
_lowercase : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__UpperCAmelCase ) ):
_lowercase : int = observations_space[o]
_lowercase : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase : Any = """"""
_lowercase : Union[str, Any] = -1
for k_state in states_space:
_lowercase : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase : Optional[Any] = probability
_lowercase : Optional[Any] = k_state
# Update probabilities and pointers dicts
_lowercase : Optional[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase : Tuple = arg_max
# The final observation
_lowercase : Tuple = observations_space[len(__UpperCAmelCase ) - 1]
# argmax for given final observation
_lowercase : Any = """"""
_lowercase : Any = -1
for k_state in states_space:
_lowercase : str = probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase : Union[str, Any] = probability
_lowercase : Any = k_state
_lowercase : Optional[Any] = arg_max
# Process pointers backwards
_lowercase : str = last_state
_lowercase : int = []
for o in range(len(__UpperCAmelCase ) - 1 , -1 , -1 ):
result.append(__UpperCAmelCase )
_lowercase : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
_validate_not_empty(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
_validate_lists(__UpperCAmelCase , __UpperCAmelCase )
_validate_dicts(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_validate_list(__UpperCAmelCase , """observations_space""" )
_validate_list(__UpperCAmelCase , """states_space""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(_object , __UpperCAmelCase ):
_lowercase : int = F"""{var_name} must be a list"""
raise ValueError(__UpperCAmelCase )
else:
for x in _object:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = F"""{var_name} must be a list of strings"""
raise ValueError(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
_validate_dict(__UpperCAmelCase , """initial_probabilities""" , __UpperCAmelCase )
_validate_nested_dict(__UpperCAmelCase , """transition_probabilities""" )
_validate_nested_dict(__UpperCAmelCase , """emission_probabilities""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_validate_dict(_object , __UpperCAmelCase , __UpperCAmelCase )
for x in _object.values():
_validate_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
if not isinstance(_object , __UpperCAmelCase ):
_lowercase : Optional[int] = F"""{var_name} must be a dict"""
raise ValueError(__UpperCAmelCase )
if not all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for x in _object ):
_lowercase : Any = F"""{var_name} all keys must be strings"""
raise ValueError(__UpperCAmelCase )
if not all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for x in _object.values() ):
_lowercase : Optional[int] = """nested dictionary """ if nested else """"""
_lowercase : Optional[int] = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 600 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: str = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase: Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCAmelCase: Union[str, Any] = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
UpperCAmelCase: str = """▁"""
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = ["input_ids", "attention_mask"]
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : List[Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else mask_token
_lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCAmelCase_ ,)
_lowercase : int = vocab_file
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_lowercase : int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_lowercase : Optional[Any] = len(self.sp_model ) - 1
_lowercase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Dict = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
return len(self.sp_model )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.sp_model.encode(UpperCAmelCase_ ,out_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : int = self.sp_model.PieceToId(UpperCAmelCase_ )
return spm_id if spm_id else self.unk_token_id
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : str = []
_lowercase : List[Any] = """"""
_lowercase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_lowercase : int = True
_lowercase : List[str] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
_lowercase : Optional[Any] = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def __getstate__( self ):
_lowercase : Dict = self.__dict__.copy()
_lowercase : List[str] = None
return state
def __setstate__( self ,UpperCAmelCase_ ):
_lowercase : str = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_lowercase : Union[str, Any] = {}
_lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : List[str] = os.path.join(
UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ ,"""wb""" ) as fi:
_lowercase : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 600 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case__ ( ) ->str:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCamelCase ):
requests.request("GET", "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET", "https://huggingface.co", timeout=1.0 )
@pytest.mark.integration
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET", "https://huggingface.co" )
def snake_case__ ( ) ->Any:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
| 575 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
if isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__lowercase : int = False
if num < 0:
__lowercase : List[Any] = True
__lowercase : List[Any] = -num
__lowercase : list[int] = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCamelCase ) for e in binary )
return "0b" + "".join(str(_lowerCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 1 |
import requests
snake_case_ : Optional[int] = '' # <-- Put your OpenWeatherMap appid here!
snake_case_ : int = 'https://api.openweathermap.org/data/2.5/'
def __UpperCAmelCase ( snake_case_ : str = "Chicago" , snake_case_ : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def __UpperCAmelCase ( snake_case_ : str = "Kolkata, India" , snake_case_ : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def __UpperCAmelCase ( snake_case_ : float = 55.68 , snake_case_ : float = 12.57 , snake_case_ : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ : Union[str, Any] = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 704 |
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( snake_case_ : list[float] ):
'''simple docstring'''
return np.maximum(0 , snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 166 | 0 |
UpperCamelCase = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def A ( lowercase__ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A ( lowercase__ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def A ( ) -> None:
UpperCamelCase__ :Union[str, Any] = """Morse code here!"""
print(lowercase__ )
UpperCamelCase__ :Dict = encrypt(lowercase__ )
print(lowercase__ )
UpperCamelCase__ :Optional[Any] = decrypt(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
main() | 45 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowerCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ : Optional[bool] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCAmelCase_ : Optional[bool] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCAmelCase_ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
lowerCAmelCase_ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
lowerCAmelCase_ : Optional[float] = field(
default=0.99_9995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def __a ( a, a ):
"""simple docstring"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
_a = logging.WARNING
if model_args.verbose_logging:
_a = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_a = logging.INFO
logger.setLevel(a )
@dataclass
class __snake_case :
"""simple docstring"""
lowerCAmelCase_ : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase_ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase_ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCAmelCase_ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase_ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCAmelCase_ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase_ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
lowerCAmelCase_ : WavaVecaForPreTraining
lowerCAmelCase_ : WavaVecaFeatureExtractor
lowerCAmelCase_ : Union[bool, str] = "longest"
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def __call__( self :Optional[Any] , UpperCamelCase__ :List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
_a = self.feature_extractor.pad(
UpperCamelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_a = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
_a = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_a = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
_a = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_a = 1
_a = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_a = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase__ , min_masks=2 , )
return batch
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *UpperCamelCase__ :Tuple , UpperCamelCase__ :List[Any]=1 , UpperCamelCase__ :Union[str, Any]=0 , UpperCamelCase__ :Tuple=1.0 , **UpperCamelCase__ :Tuple ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
_a = 0
_a = max_gumbel_temp
_a = min_gumbel_temp
_a = gumbel_temp_decay
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :nn.Module , UpperCamelCase__ :Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_a = self._prepare_inputs(UpperCamelCase__ )
if self.use_amp:
with autocast():
_a = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ )
else:
_a = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_a = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_a = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_a = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __a ( ):
"""simple docstring"""
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
configure_logger(a, a )
# Downloading and loading a dataset from the hub.
_a = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_a = DatasetDict()
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, )
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, )
else:
# make sure only "validation" and "train" keys remain"
_a = DatasetDict()
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split="validation", cache_dir=model_args.cache_dir, )
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'{data_args.train_split_name}', cache_dir=model_args.cache_dir, )
# only normalized-inputs-training is supported
_a = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=a )
def prepare_dataset(a ):
# check that all files have the correct sampling rate
_a , _a = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_a = datasets.map(
a, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_a = vectorized_datasets.filter(
lambda a : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a ):
return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_a = vectorized_datasets.map(
a, batched=a, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["train"].column_names, )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_a = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_a = WavaVecaForPreTraining(a )
_a = DataCollatorForWavaVecaPretraining(model=a, feature_extractor=a )
_a = WavaVecaPreTrainer(
model=a, data_collator=a, args=a, train_dataset=vectorized_datasets["train"], eval_dataset=vectorized_datasets["validation"], tokenizer=a, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, )
trainer.train()
if __name__ == "__main__":
main()
| 388 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ : Tuple = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ['input_ids', 'attention_mask']
__snake_case : Any = BartTokenizer
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str="replace" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Tuple="</s>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE : Any="<pad>" , SCREAMING_SNAKE_CASE : List[Any]="<mask>" , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=True , **SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ :Dict = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE_ :Any = pre_tok_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ :str = 'post_processor'
SCREAMING_SNAKE_CASE_ :Dict = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ :Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ :int = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ :int = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ :str = False
if state.get('add_prefix_space' , SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ :str = add_prefix_space
SCREAMING_SNAKE_CASE_ :str = True
if state.get('trim_offsets' , SCREAMING_SNAKE_CASE ) != trim_offsets:
SCREAMING_SNAKE_CASE_ :Optional[Any] = trim_offsets
SCREAMING_SNAKE_CASE_ :List[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ :List[Any] = getattr(SCREAMING_SNAKE_CASE , state.pop('type' ) )
SCREAMING_SNAKE_CASE_ :Any = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self : int ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
SCREAMING_SNAKE_CASE_ :Any = value
def _lowercase ( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Any = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
SCREAMING_SNAKE_CASE__ : int = """▁"""
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Union[str, Any] = VOCAB_FILES_NAMES
__snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ['input_ids', 'attention_mask']
def __init__( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Dict="</s>" , SCREAMING_SNAKE_CASE : Any="<s>" , SCREAMING_SNAKE_CASE : Dict="<unk>" , SCREAMING_SNAKE_CASE : Any="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
SCREAMING_SNAKE_CASE_ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ :str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self : Any ):
"""simple docstring"""
return len(self.sp_model )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ :Any = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = []
SCREAMING_SNAKE_CASE_ :Tuple = ''
SCREAMING_SNAKE_CASE_ :int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ :Dict = True
SCREAMING_SNAKE_CASE_ :int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ :List[Any] = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ :List[Any] = {}
SCREAMING_SNAKE_CASE_ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ :int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 233 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase__ ="scheduler_config.json"
class A__( __magic_name__ ):
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 5
lowerCAmelCase = 6
lowerCAmelCase = 7
lowerCAmelCase = 8
lowerCAmelCase = 9
lowerCAmelCase = 10
lowerCAmelCase = 11
lowerCAmelCase = 12
lowerCAmelCase = 13
lowerCAmelCase = 14
@dataclass
class A__( __magic_name__ ):
lowerCAmelCase = 42
class A__:
lowerCAmelCase = SCHEDULER_CONFIG_NAME
lowerCAmelCase = []
lowerCAmelCase = True
@classmethod
def _a ( cls : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.load_config(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , return_commit_hash=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return cls.from_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
self.save_config(save_directory=__SCREAMING_SNAKE_CASE , push_to_hub=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _a ( self : List[str] ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _a ( cls : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) )
__SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split('''.''' )[0] )
__SCREAMING_SNAKE_CASE = [
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 482 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
a__ = Vector()
def _A ( self ):
'''simple docstring'''
a__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowerCamelCase ) , """(0,0,0,0,0,1)""" )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowerCamelCase ) , 4 )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2] )
a__ = Vector([1, 2, 3, 4, 5] )
a__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
a__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3] )
a__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3] )
a__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3] )
a__ = Vector([2, -1, 4] ) # for test of dot product
a__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _A ( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _A ( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 2, 3] )
a__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowerCamelCase , lowerCamelCase ) ) , """(3,4,7)""" )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 0, 0, 0, 0, 0] )
a__ = x.copy()
self.assertEqual(str(lowerCamelCase ) , str(lowerCamelCase ) )
def _A ( self ):
'''simple docstring'''
a__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowerCamelCase ) , """(0,1,0)""" )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCamelCase ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowerCamelCase , lowerCamelCase ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCamelCase , lowerCamelCase ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
a__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCamelCase ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _A ( self ):
'''simple docstring'''
a__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _A ( self ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 701 |
import math
def UpperCAmelCase ( lowercase__ : list , lowercase__ : int ):
'''simple docstring'''
a__ = len(lowercase__ )
a__ = int(math.floor(math.sqrt(lowercase__ ) ) )
a__ = 0
while arr[min(lowercase__ , lowercase__ ) - 1] < x:
a__ = step
step += int(math.floor(math.sqrt(lowercase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a__ = prev + 1
if prev == min(lowercase__ , lowercase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowercase : Any =input("""Enter numbers separated by a comma:\n""").strip()
_lowercase : Any =[int(item) for item in user_input.split(""",""")]
_lowercase : Any =int(input("""Enter the number to be searched:\n"""))
_lowercase : Optional[int] =jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'''Number {x} is at index {res}''')
| 412 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ = int(__lowerCAmelCase )
assert noofclusters < len(__lowerCAmelCase )
# Find out the dimensionality
lowerCamelCase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCamelCase__ = list(range(len(__lowerCAmelCase ) ) )
shuffle(__lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCamelCase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCamelCase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCamelCase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCamelCase__ = tf.placeholder("""float64""" , [dim] )
lowerCamelCase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCamelCase__ = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCamelCase__ = tf.placeholder("""int32""" )
lowerCamelCase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCamelCase__ = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCamelCase__ = tf.reduce_mean(__lowerCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCamelCase__ = tf.placeholder("""float""" , [dim] )
lowerCamelCase__ = tf.placeholder("""float""" , [dim] )
lowerCamelCase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase , __lowerCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCamelCase__ = tf.placeholder("""float""" , [noofclusters] )
lowerCamelCase__ = tf.argmin(__lowerCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCamelCase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCamelCase__ = 100
for _ in range(__lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCamelCase__ = [
sess.run(__lowerCAmelCase , feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCamelCase__ = sess.run(
__lowerCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
lowerCamelCase__ = [
vectors[i]
for i in range(len(__lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCamelCase__ = sess.run(
__lowerCAmelCase , feed_dict={mean_input: array(__lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCamelCase__ = sess.run(__lowerCAmelCase )
lowerCamelCase__ = sess.run(__lowerCAmelCase )
return centroids, assignments
| 165 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a :
def __init__( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=13 , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : Tuple=17 , lowerCamelCase_ : List[Any]=23 , lowerCamelCase_ : Tuple=11 , lowerCamelCase_ : Dict=True , ) -> Union[str, Any]:
__a = parent
__a = batch_size
__a = seq_length
__a = act_dim
__a = state_dim
__a = hidden_size
__a = max_length
__a = is_training
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
__a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
__a = random_attention_mask((self.batch_size, self.seq_length) )
__a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCAmelCase_ ( self : str ) -> List[str]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , ) -> int:
__a = DecisionTransformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCAmelCase_ ( self : Any ) -> List[Any]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class a ( A_ , A_ , A_ , unittest.TestCase ):
A_ : List[str] = (DecisionTransformerModel,) if is_torch_available() else ()
A_ : Union[str, Any] = ()
A_ : Optional[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A_ : Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A_ : Optional[int] = False
A_ : Optional[int] = False
A_ : Tuple = False
A_ : Tuple = False
A_ : int = False
A_ : Any = False
A_ : Union[str, Any] = False
A_ : Optional[Any] = False
A_ : List[Any] = False
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
__a = DecisionTransformerModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Optional[Any] ) -> str:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DecisionTransformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ) -> int:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase_ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCamelCase_ )] , lowerCamelCase_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Any ) -> Dict:
__a = 2 # number of steps of autoregressive prediction we will perform
__a = 10 # defined by the RL environment, may be normalized
__a = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__a = model.to(lowerCamelCase_ )
__a = model.config
torch.manual_seed(0 )
__a = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ) # env.reset()
__a = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowerCamelCase_ )
__a = torch.tensor(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a = state
__a = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase_ , dtype=torch.floataa )
__a = torch.zeros(1 , 0 , device=lowerCamelCase_ , dtype=torch.floataa )
__a = torch.tensor(0 , device=lowerCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCamelCase_ ):
__a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase_ )] , dim=1 )
__a = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase_ )] , dim=1 )
__a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a = model(
states=lowerCamelCase_ , actions=lowerCamelCase_ , rewards=lowerCamelCase_ , returns_to_go=lowerCamelCase_ , timesteps=lowerCamelCase_ , attention_mask=lowerCamelCase_ , return_dict=lowerCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__a , __a , __a , __a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__a = action_pred[0, -1]
__a = torch.cat([states, state] , dim=1 )
__a = returns_to_go[0, -1] - reward
__a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 703 | """simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
__a = a + h
while x < (b - h):
yield x
__a = x + h
def UpperCamelCase ( _lowerCAmelCase : int ): # enter your function here
__a = (x - 0) * (x - 0)
return y
def UpperCamelCase ( ):
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 173 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def __UpperCamelCase( _A : int ):
'''simple docstring'''
if not isinstance(_A , _A ):
UpperCAmelCase__ : Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = AutoencoderKL
UpperCAmelCase_ : Any = '''sample'''
UpperCAmelCase_ : List[str] = 1E-2
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : Optional[int] = 3
UpperCAmelCase__ : Tuple = (32, 32)
UpperCAmelCase__ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase__ : Any = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase__ : Optional[int] = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase__ : Optional[int] = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase__ : int = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase__ : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase__ : Union[str, Any] = dict(model.named_parameters() )
UpperCAmelCase__ : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
UpperCAmelCase__ : Optional[Any] = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase__ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase__ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : str = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase__ : int = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase__ : int = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase__ : List[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ,lowerCamelCase_=(4, 3, 512, 512) ,lowerCamelCase_=False ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def lowerCAmelCase__ ( self ,lowerCamelCase_="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''fp16''' if fpaa else None
UpperCAmelCase__ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : List[Any] = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder='''vae''' ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_sd_vae_model()
UpperCAmelCase__ : str = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase__ : Dict = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : List[str] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_sd_vae_model()
UpperCAmelCase__ : Optional[int] = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_sd_vae_model()
UpperCAmelCase__ : Dict = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : Optional[int] = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase__ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : int = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : str = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Dict = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : Any = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Any = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : int = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase__ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_sd_vae_model()
UpperCAmelCase__ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase__ : Tuple = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase__ : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase__ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 614 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = '▁'
snake_case = {'vocab_file': 'spiece.model'}
snake_case = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
snake_case = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="</s>" , lowercase__="<unk>" , lowercase__=[] , lowercase__ = None , **lowercase__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def A ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ) -> Dict[str, int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def A ( self , lowercase__ ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(lowercase__ )
def A ( self , lowercase__ ) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(lowercase__ )
return token
def A ( self , lowercase__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def A ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 717 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : str = "beit"
def __init__( self , lowercase__=8192 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3072 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=224 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=0.1 , lowercase__=0.1 , lowercase__=True , lowercase__=[3, 5, 7, 11] , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=256 , lowercase__=1 , lowercase__=False , lowercase__=255 , **lowercase__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = use_mask_token
SCREAMING_SNAKE_CASE = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE = use_relative_position_bias
SCREAMING_SNAKE_CASE = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE = use_auxiliary_head
SCREAMING_SNAKE_CASE = auxiliary_loss_weight
SCREAMING_SNAKE_CASE = auxiliary_channels
SCREAMING_SNAKE_CASE = auxiliary_num_convs
SCREAMING_SNAKE_CASE = auxiliary_concat_input
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = version.parse("1.11" )
@property
def A ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self ) -> float:
"""simple docstring"""
return 1E-4
| 406 | 0 |
"""simple docstring"""
def __lowercase ( _a ):
return str(__A ) == str(__A )[::-1]
def __lowercase ( _a ):
return int(__A ) + int(str(__A )[::-1] )
def __lowercase ( _a = 10_000 ):
snake_case_ : int = []
for num in range(1 , __A ):
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = num
while iterations < 50:
snake_case_ : Optional[Any] = sum_reverse(__A )
iterations += 1
if is_palindrome(__A ):
break
else:
lychrel_nums.append(__A )
return len(__A )
if __name__ == "__main__":
print(f'{solution() = }')
| 123 |
from collections import deque
def A__ ( __A : Optional[Any] ) ->Tuple:
__A =len(__A )
__A =deque()
__A =[False for _ in range(__A )]
__A =[-1 for _ in range(__A )]
__A =index_of[:]
def strong_connect(__A : Union[str, Any] , __A : int , __A : Optional[int] ):
__A =index # the number when this node is seen
__A =index # lowest rank node reachable from here
index += 1
stack.append(__A )
__A =True
for w in g[v]:
if index_of[w] == -1:
__A =strong_connect(__A , __A , __A )
__A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__A =[]
__A =stack.pop()
__A =False
component.append(__A )
while w != v:
__A =stack.pop()
__A =False
component.append(__A )
components.append(__A )
return index
__A =[]
for v in range(__A ):
if index_of[v] == -1:
strong_connect(__A , 0 , __A )
return components
def A__ ( __A : List[Any] , __A : Optional[int] ) ->Tuple:
__A =[[] for _ in range(__A )]
for u, v in edges:
g[u].append(__A )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase : int = 7
_lowerCamelCase : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase : Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 184 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> bool:
if not isinstance(snake_case , snake_case ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_lowerCamelCase = str(snake_case )
_lowerCamelCase = ''.join(sorted(snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def SCREAMING_SNAKE_CASE_ ( snake_case : float = 99 )-> int:
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_lowerCamelCase = 0
_lowerCamelCase = 1
while True:
if check_bouncy(snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}')
| 222 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ : List[str] ={
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[Any] )-> Tuple:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[str] = None )-> None:
_lowerCamelCase = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , snake_case ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = requirement, None, None
else:
_lowerCamelCase = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_full.split(',' ) # there could be multiple requirements
_lowerCamelCase = {}
for w in want_range:
_lowerCamelCase = re.findall(r'^([\s!=<>]{1,2})(.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_lowerCamelCase = '.'.join([str(snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return
# check if any version is installed
try:
_lowerCamelCase = importlib.metadata.version(snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> List[Any]:
_lowerCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case , snake_case )
| 222 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = year % 1_9
snake_case_ = year % 4
snake_case_ = year % 7
snake_case_ = math.floor(year / 1_0_0 )
snake_case_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
snake_case_ = leap_day_inhibits / 4
snake_case_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
snake_case_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
snake_case_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(snake_case , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(snake_case , 4 , 1_8 )
else:
return datetime(snake_case , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_SCREAMING_SNAKE_CASE : Any = "will be" if year > datetime.now().year else "was"
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 400 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
re.sub("<n>" , "" , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 400 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
super().__init__(*_lowercase , **_lowercase )
_lowerCamelCase : Any = {}
def a__ ( self , _lowercase , *_lowercase , **_lowercase ) -> int:
_lowerCamelCase : List[str] = super().add_tokens(_lowercase , *_lowercase , **_lowercase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def a__ ( self , _lowercase , *_lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
_lowerCamelCase : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowercase , *_lowercase , **_lowercase )
output.append(_lowercase )
else:
_lowerCamelCase : Union[str, Any] = []
for i in range(_lowercase ):
_lowerCamelCase : Any = placeholder_token + F'''_{i}'''
self.try_adding_tokens(_lowercase , *_lowercase , **_lowercase )
output.append(_lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCamelCase : Dict = output
def a__ ( self , _lowercase , _lowercase=False , _lowercase=1.0 ) -> Tuple:
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : Optional[int] = []
for i in range(len(_lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCamelCase : List[Any] = self.token_map[placeholder_token]
_lowerCamelCase : Optional[int] = tokens[: 1 + int(len(_lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCamelCase : List[Any] = copy.copy(_lowercase )
random.shuffle(_lowercase )
_lowerCamelCase : List[str] = text.replace(_lowercase , ''' '''.join(_lowercase ) )
return text
def __call__( self , _lowercase , *_lowercase , _lowercase=False , _lowercase=1.0 , **_lowercase ) -> Optional[int]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowercase , vector_shuffle=_lowercase , prop_tokens_to_load=_lowercase ) , *_lowercase , **_lowercase , )
def a__ ( self , _lowercase , *_lowercase , _lowercase=False , _lowercase=1.0 , **_lowercase ) -> Union[str, Any]:
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowercase , vector_shuffle=_lowercase , prop_tokens_to_load=_lowercase ) , *_lowercase , **_lowercase , )
| 558 | """simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def a__ ( self ) -> Tuple:
super().setUp()
# fmt: off
_lowerCamelCase : List[str] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_lowerCamelCase : Any = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def a__ ( self , **_lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , **_lowercase ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def a__ ( self ) -> Tuple:
_lowerCamelCase : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Optional[int] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_lowerCamelCase : str = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCamelCase : Any = tokens + [tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
@require_ftfy
def a__ ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Any = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Any = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : Optional[int] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Any = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Union[str, Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : Dict = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : str = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : List[Any] = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCamelCase : str = F''' {text}'''
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
def a__ ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowercase ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def a__ ( self ) -> Tuple:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Tuple:
# CLIP always lower cases letters
pass
| 558 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase (a_ ):
def __UpperCAmelCase ( self , __UpperCamelCase )-> int:
with open(__UpperCamelCase , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(__UpperCamelCase )
return match
def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[int]:
with open(__UpperCamelCase , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(__UpperCamelCase )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__UpperCamelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__UpperCamelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 367 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 367 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
self.register_modules(vqvae=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 50 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
UpperCamelCase_ : Any = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , )
UpperCamelCase_ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase_ : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : str = {}
if accepts_eta:
UpperCamelCase_ : str = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase_ : str = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
UpperCamelCase_ : Dict = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : Union[str, Any] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# decode the image latents with the VAE
UpperCamelCase_ : Optional[int] = self.vqvae.decode(__lowerCAmelCase ).sample
UpperCamelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ : Tuple = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 543 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
random.seed(a_ )
np.random.seed(a_ )
torch.manual_seed(a_ )
torch.cuda.manual_seed_all(a_ )
# ^^ safe to call this function even if cuda is not available
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 0.99_99 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 2 / 3 , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : Dict = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase_ : Optional[int] = True
if kwargs.get("""max_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Tuple = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : str = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Dict = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = kwargs["""min_value"""]
UpperCamelCase_ : Optional[Any] = list(__lowerCAmelCase )
UpperCamelCase_ : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs["""device"""] )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Optional[Any] = decay
UpperCamelCase_ : List[str] = min_decay
UpperCamelCase_ : int = update_after_step
UpperCamelCase_ : Optional[int] = use_ema_warmup
UpperCamelCase_ : Optional[int] = inv_gamma
UpperCamelCase_ : Any = power
UpperCamelCase_ : str = 0
UpperCamelCase_ : List[str] = None # set in `step()`
UpperCamelCase_ : Union[str, Any] = model_cls
UpperCamelCase_ : Any = model_config
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ , UpperCamelCase_ : int = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
UpperCamelCase_ : str = model_cls.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
UpperCamelCase_ : int = self.model_cls.from_config(self.model_config )
UpperCamelCase_ : List[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase_ : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase_ : List[Any] = (1 + step) / (10 + step)
UpperCamelCase_ : Optional[Any] = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase_ : Optional[Any] = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : str = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : int = parameters.parameters()
UpperCamelCase_ : Optional[int] = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase_ : Any = self.get_decay(self.optimization_step )
UpperCamelCase_ : List[str] = decay
UpperCamelCase_ : Any = 1 - decay
UpperCamelCase_ : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase_ : Optional[Any] = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : str = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase_ : Union[str, Any] = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase_ : List[Any] = None
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = copy.deepcopy(__lowerCAmelCase )
UpperCamelCase_ : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
UpperCamelCase_ : Dict = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
UpperCamelCase_ : str = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
UpperCamelCase_ : Dict = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
UpperCamelCase_ : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
UpperCamelCase_ : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
UpperCamelCase_ : str = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
UpperCamelCase_ : Dict = state_dict.get("""shadow_params""" , __lowerCAmelCase )
if shadow_params is not None:
UpperCamelCase_ : Any = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 543 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 200 ):
snake_case__ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case__ : Dict = [0] * (pence + 1)
snake_case__ : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 297 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : int = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : Dict = {
"""yjernite/retribert-base-uncased""": 512,
}
__lowerCamelCase : List[str] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __A : Tuple=None , __A : List[Any]=None , __A : Optional[Any]=True , __A : List[Any]="[UNK]" , __A : Any="[SEP]" , __A : str="[PAD]" , __A : List[str]="[CLS]" , __A : str="[MASK]" , __A : Tuple=True , __A : str=None , **__A : Tuple , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __A ) != do_lower_case
or normalizer_state.get("strip_accents" , __A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __A ) != tokenize_chinese_chars
):
snake_case__ : List[str] = getattr(__A , normalizer_state.pop("type" ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : Any = normalizer_class(**__A )
snake_case__ : List[Any] = do_lower_case
def _lowercase ( self : Any , __A : Any , __A : str=None ):
snake_case__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
snake_case__ : int = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 297 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE : Dict =logging.getLogger()
def UpperCamelCase__ ( ):
lowercase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase = parser.parse_args()
return args.f
class A_ ( __lowerCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] ):
lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_UpperCamelCase , """argv""" , _UpperCamelCase ):
lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_UpperCamelCase )
lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase )
lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_UpperCamelCase )
| 715 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCAmelCase : int = random.Random()
def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[int]=1.0 , __snake_case : Tuple=None , __snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
if rng is None:
snake_case__ :Optional[int] = global_rng
snake_case__ :Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=400 ,UpperCamelCase=2_000 ,UpperCamelCase=10 ,UpperCamelCase=160 ,UpperCamelCase=8 ,UpperCamelCase=0.0 ,UpperCamelCase=4_000 ,UpperCamelCase=False ,UpperCamelCase=True ,) -> Any:
snake_case__ :Optional[int] = parent
snake_case__ :Dict = batch_size
snake_case__ :Any = min_seq_length
snake_case__ :Union[str, Any] = max_seq_length
snake_case__ :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ :Optional[Any] = padding_value
snake_case__ :int = sampling_rate
snake_case__ :Tuple = return_attention_mask
snake_case__ :str = do_normalize
snake_case__ :int = feature_size
snake_case__ :List[Any] = chunk_length
snake_case__ :Optional[Any] = hop_length
def lowerCAmelCase_ ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ ( self ,UpperCamelCase=False ,UpperCamelCase=False ) -> List[Any]:
def _flatten(UpperCamelCase ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
snake_case__ :List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ :Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
snake_case__ :List[Any] = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( _A , unittest.TestCase ):
_A = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = WhisperFeatureExtractionTester(self )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ :Any = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
snake_case__ :Dict = self.feature_extraction_class.from_pretrained(UpperCamelCase )
snake_case__ :str = feat_extract_first.to_dict()
snake_case__ :int = feat_extract_second.to_dict()
snake_case__ :Union[str, Any] = feat_extract_first.mel_filters
snake_case__ :Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ) )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ :Any = os.path.join(UpperCamelCase ,"feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase )
snake_case__ :Dict = self.feature_extraction_class.from_json_file(UpperCamelCase )
snake_case__ :Optional[int] = feat_extract_first.to_dict()
snake_case__ :Union[str, Any] = feat_extract_second.to_dict()
snake_case__ :Optional[Any] = feat_extract_first.mel_filters
snake_case__ :Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ) )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ :Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
snake_case__ :List[str] = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case__ :List[Any] = feature_extractor(UpperCamelCase ,padding="max_length" ,return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case__ :Optional[int] = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_features
snake_case__ :Dict = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
# Test batched
snake_case__ :Optional[int] = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
snake_case__ :Any = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ :int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ :Optional[int] = np.asarray(UpperCamelCase )
snake_case__ :int = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
snake_case__ :Optional[int] = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
# Test truncation required
snake_case__ :List[str] = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
snake_case__ :Union[str, Any] = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
snake_case__ :Optional[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case__ :Any = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs_truncated]
snake_case__ :Dict = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
snake_case__ :Dict = feature_extractor(UpperCamelCase ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
import torch
snake_case__ :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ :Optional[int] = np.random.rand(100 ,32 ).astype(np.floataa )
snake_case__ :Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ :str = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ :List[str] = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
snake_case__ :Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
snake_case__ :Optional[Any] = ds.sort("id" ).select(range(UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self ) -> int:
# fmt: off
snake_case__ :Optional[Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case__ :Optional[Any] = self._load_datasamples(1 )
snake_case__ :Optional[int] = WhisperFeatureExtractor()
snake_case__ :Tuple = feature_extractor(UpperCamelCase ,return_tensors="pt" ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,UpperCamelCase ,atol=1E-4 ) )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ :str = self._load_datasamples(1 )[0]
snake_case__ :List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
snake_case__ :Any = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ) - 1 ) < 1E-3 ) ) | 241 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = DDIMPipeline
_A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A = False
def lowerCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ :List[str] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
snake_case__ :int = DDIMScheduler()
snake_case__ :List[Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[Any]:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Dict = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = "cpu"
snake_case__ :int = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Dict = pipe(**UpperCamelCase ).images
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
snake_case__ :Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
snake_case__ :Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = "google/ddpm-cifar10-32"
snake_case__ :int = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = DDIMScheduler()
snake_case__ :Any = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddim.to(UpperCamelCase )
ddim.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = torch.manual_seed(0 )
snake_case__ :Optional[Any] = ddim(generator=UpperCamelCase ,eta=0.0 ,output_type="numpy" ).images
snake_case__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :str = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = "google/ddpm-ema-bedroom-256"
snake_case__ :Tuple = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :int = DDIMScheduler.from_pretrained(UpperCamelCase )
snake_case__ :Union[str, Any] = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddpm.to(UpperCamelCase )
ddpm.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :int = torch.manual_seed(0 )
snake_case__ :Optional[int] = ddpm(generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ :Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 241 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> str:
A__ = tempfile.mkdtemp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A__ = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
A__ = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> int:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> List[str]:
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self ) -> List[str]:
A__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
A__ = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
A__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self ) -> List[str]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A__ = self.prepare_image_inputs()
A__ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
A__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> List[str]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A__ = "Alexandra,T-shirt的价格是15便士。"
A__ = processor(text=_SCREAMING_SNAKE_CASE )
A__ = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> Dict:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A__ = "Alexandra,T-shirt的价格是15便士。"
A__ = self.prepare_image_inputs()
A__ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A__ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A__ = "Alexandra,T-shirt的价格是15便士。"
A__ = self.prepare_image_inputs()
A__ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 709 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = AudioLDMPipeline
snake_case = TEXT_TO_AUDIO_PARAMS
snake_case = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , )
A_ : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
A_ : Optional[Any] = ClapTextModelWithProjection(lowerCamelCase_ )
A_ : Dict = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
A_ : List[str] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , )
A_ : Tuple = SpeechTaHifiGan(lowerCamelCase_ )
A_ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->str:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
A_ : int = torch.manual_seed(lowerCamelCase_ )
else:
A_ : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
A_ : Optional[int] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : str = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Optional[int] = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : int = self.get_dummy_inputs(lowerCamelCase_ )
A_ : List[Any] = audioldm_pipe(**lowerCamelCase_ )
A_ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
A_ : Optional[Any] = audio[:10]
A_ : List[str] = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Dict = self.get_dummy_components()
A_ : Tuple = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Union[str, Any] = audioldm_pipe.to(lowerCamelCase_ )
A_ : List[Any] = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ )
A_ : Dict = 3 * [inputs['prompt']]
# forward
A_ : List[Any] = audioldm_pipe(**lowerCamelCase_ )
A_ : Optional[Any] = output.audios[0]
A_ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
A_ : Optional[Any] = 3 * [inputs.pop('''prompt''' )]
A_ : Any = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''pt''' , )
A_ : Union[str, Any] = text_inputs['input_ids'].to(lowerCamelCase_ )
A_ : List[str] = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
A_ : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A_ : Dict = F.normalize(lowerCamelCase_ , dim=-1 )
A_ : Tuple = prompt_embeds
# forward
A_ : str = audioldm_pipe(**lowerCamelCase_ )
A_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Any = self.get_dummy_components()
A_ : Optional[int] = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Any = audioldm_pipe.to(lowerCamelCase_ )
A_ : List[str] = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Any = self.get_dummy_inputs(lowerCamelCase_ )
A_ : Tuple = 3 * ['this is a negative prompt']
A_ : Optional[int] = negative_prompt
A_ : Optional[int] = 3 * [inputs['prompt']]
# forward
A_ : Optional[int] = audioldm_pipe(**lowerCamelCase_ )
A_ : Union[str, Any] = output.audios[0]
A_ : str = self.get_dummy_inputs(lowerCamelCase_ )
A_ : Tuple = 3 * [inputs.pop('''prompt''' )]
A_ : List[str] = []
for p in [prompt, negative_prompt]:
A_ : List[str] = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''pt''' , )
A_ : Any = text_inputs['input_ids'].to(lowerCamelCase_ )
A_ : Union[str, Any] = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
A_ : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A_ : Optional[int] = F.normalize(lowerCamelCase_ , dim=-1 )
embeds.append(lowerCamelCase_ )
A_ : Optional[int] = embeds
# forward
A_ : int = audioldm_pipe(**lowerCamelCase_ )
A_ : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A_ : int = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Union[str, Any] = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Any = self.get_dummy_inputs(lowerCamelCase_ )
A_ : List[str] = 'egg cracking'
A_ : str = audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
A_ : str = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
A_ : Tuple = audio[:10]
A_ : Any = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A_ : Dict = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Dict = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Dict = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
A_ : Tuple = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A_ : List[str] = 2
A_ : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
A_ : Union[str, Any] = 2
A_ : List[Any] = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
A_ : Tuple = 2
A_ : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : int = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Optional[int] = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
A_ : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
A_ : str = audioldm_pipe(audio_length_in_s=0.0_1_6 , **lowerCamelCase_ )
A_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_1_6
A_ : int = audioldm_pipe(audio_length_in_s=0.0_3_2 , **lowerCamelCase_ )
A_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_3_2
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = self.get_dummy_components()
A_ : List[Any] = AudioLDMPipeline(**lowerCamelCase_ )
A_ : Any = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : Optional[Any] = ['hey']
A_ : List[str] = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
A_ : Tuple = output.audios.shape
assert audio_shape == (1, 256)
A_ : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A_ : Tuple = SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ )
A_ : Optional[int] = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
A_ : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _snake_case ( self )->Tuple:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ )
def _snake_case ( self )->Any:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ )
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->Any:
'''simple docstring'''
A_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
A_ : List[Any] = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) )
A_ : Optional[int] = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
A_ : int = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
A_ : Tuple = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : List[str] = self.get_inputs(lowerCamelCase_ )
A_ : Union[str, Any] = 25
A_ : str = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 8_1920
A_ : Optional[int] = audio[7_7230:7_7240]
A_ : List[Any] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
A_ : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
A_ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
A_ : str = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A_ : int = self.get_inputs(lowerCamelCase_ )
A_ : Optional[int] = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 8_1920
A_ : Any = audio[2_7780:2_7790]
A_ : Dict = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
A_ : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 590 |
__UpperCamelCase: str = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase: Tuple = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def SCREAMING_SNAKE_CASE__ ( _lowercase : dict[int, list[int]] , _lowercase : int , _lowercase : list[bool] ) -> list[int]:
'''simple docstring'''
lowercase__ : str = True
lowercase__ : int = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def SCREAMING_SNAKE_CASE__ ( _lowercase : dict[int, list[int]] , _lowercase : int , _lowercase : list[bool] ) -> list[int]:
'''simple docstring'''
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def SCREAMING_SNAKE_CASE__ ( _lowercase : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
lowercase__ : Optional[int] = len(_lowercase ) * [False]
lowercase__ : dict[int, list[int]] = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
lowercase__ : Dict = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
lowercase__ : Dict = []
lowercase__ : Optional[Any] = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
lowercase__ : List[Any] = order[len(_lowercase ) - i - 1]
if not visited[vert]:
lowercase__ : Tuple = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 266 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="vit_mae"
def __init__(self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=0.75 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = decoder_num_attention_heads
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = decoder_num_hidden_layers
UpperCamelCase__ = decoder_intermediate_size
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = norm_pix_loss
| 469 | # Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__magic_name__ ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__magic_name__ =concatenate_datasets
__magic_name__ =DownloadConfig
__magic_name__ =DownloadManager
__magic_name__ =DownloadMode
__magic_name__ =DownloadConfig
__magic_name__ =DownloadMode
__magic_name__ =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 469 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def A__ ( snake_case_ : str , snake_case_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE__: Dict= XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
else:
SCREAMING_SNAKE_CASE__: int= ProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= ProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= ['''key_proj''', '''value_proj''', '''query_proj''']
SCREAMING_SNAKE_CASE__: Optional[int]= {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE__: Optional[int]= key.split('''.''' )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE__: List[str]= prophet
SCREAMING_SNAKE_CASE__: List[Any]= prophet_old
else:
SCREAMING_SNAKE_CASE__: List[Any]= prophet.prophetnet
SCREAMING_SNAKE_CASE__: Dict= prophet_old.model
SCREAMING_SNAKE_CASE__: Tuple= False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE__: Optional[Any]= mapping[attribute]
if not hasattr(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Optional[int]= attribute
elif hasattr(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[int]= attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__: Tuple= old_model.weight
logger.info(F'{attribute} is initialized.' )
SCREAMING_SNAKE_CASE__: Optional[Any]= True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__: Dict= old_model.bias
logger.info(F'{attribute} is initialized' )
SCREAMING_SNAKE_CASE__: Tuple= True
break
elif attribute in special_keys and hasattr(snake_case_ , '''in_proj_weight''' ):
SCREAMING_SNAKE_CASE__: Any= old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE__: Any= nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE__: List[str]= nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE__: str= nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE__: Dict= nn.Parameter(old_model.embed_positions.weight[:512, :] )
SCREAMING_SNAKE_CASE__: Any= True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE__: Tuple= model[int(snake_case_ )]
SCREAMING_SNAKE_CASE__: Tuple= old_model[int(snake_case_ )]
else:
SCREAMING_SNAKE_CASE__: List[str]= getattr(snake_case_ , snake_case_ )
if old_attribute == "":
SCREAMING_SNAKE_CASE__: List[Any]= old_model
else:
if not hasattr(snake_case_ , snake_case_ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
SCREAMING_SNAKE_CASE__: int= getattr(snake_case_ , snake_case_ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
"""simple docstring"""
import sys
lowerCAmelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def a__ ( SCREAMING_SNAKE_CASE : str = N ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = -sys.maxsize - 1
lowerCAmelCase : str = n[:1_3]
lowerCAmelCase : Tuple = 1_3
while cur_index < len(SCREAMING_SNAKE_CASE ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase : int = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Dict = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 645 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> Optional[int]:
_lowerCAmelCase =size if size is not None else {'shortest_edge': 18}
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean
_lowerCAmelCase =image_std
def UpperCamelCase__ ( self ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : int = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'do_center_crop' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ) -> List[str]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> int:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> int:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 58 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(lowerCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 0
return output
def _A ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(F'In-order Traversal: {inorder(lowerCAmelCase_ )}' )
print(F'Pre-order Traversal: {preorder(lowerCAmelCase_ )}' )
print(F'Post-order Traversal: {postorder(lowerCAmelCase_ )}' , "\n" )
print(F'Height of Tree: {height(lowerCAmelCase_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCAmelCase_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCAmelCase_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 61 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowercase_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
__magic_name__ : Tuple = tmp_path_factory.getbasetemp() / '''cache'''
__magic_name__ : Any = test_hf_cache_home / '''datasets'''
__magic_name__ : int = test_hf_cache_home / '''metrics'''
__magic_name__ : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(UpperCAmelCase ) )
__magic_name__ : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(UpperCAmelCase ) )
__magic_name__ : Optional[int] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(UpperCAmelCase ) )
@pytest.fixture(autouse=UpperCAmelCase, scope='''session''' )
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', UpperCAmelCase )
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', UpperCAmelCase )
| 721 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase_ = True
except (ImportError, AttributeError):
lowercase_ = object
def lowerCAmelCase ( *UpperCAmelCase, **UpperCAmelCase ) ->Any:
"""simple docstring"""
pass
lowercase_ = False
lowercase_ = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCAmelCase, args.host, args.port, args.workers )
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : dict
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[int]]
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : str
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any
class A__ ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def lowercase ( lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : int = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
__magic_name__ : List[str] = pipeline
__magic_name__ : Union[str, Any] = host
__magic_name__ : int = port
__magic_name__ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
__magic_name__ : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
] , timeout=600 , )
def lowercase ( self ) -> Dict:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self ) -> str:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Any:
"""simple docstring"""
try:
__magic_name__ : List[str] = self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
__magic_name__ : int = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase , tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , ) -> Any:
"""simple docstring"""
try:
__magic_name__ : Any = self._pipeline.tokenizer.decode(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def lowercase ( self , lowerCamelCase=Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Optional[int]:
"""simple docstring"""
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__magic_name__ : Optional[int] = self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCamelCase )} )
| 336 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__UpperCAmelCase = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__UpperCAmelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
__UpperCAmelCase = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 90 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
lowerCamelCase_ = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCamelCase_ = 1
lowerCamelCase_ = 2
while i * i <= n:
while n % i == 0:
lowerCamelCase_ = i
n //= i
i += 1
if n > 1:
lowerCamelCase_ = n
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 142 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __UpperCAmelCase ( ) -> Tuple:
from torch.utils.cpp_extension import load
snake_case__ : int = Path(UpperCamelCase__ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
snake_case__ : List[Any] = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCamelCase__ , with_cuda=UpperCamelCase__ , extra_include_paths=[str(UpperCamelCase__ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 574 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : str ={
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowercase ( lowercase__ ):
lowercase = '''facebook/nllb-200-distilled-600M'''
lowercase = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase = '''translator'''
lowercase = AutoTokenizer
lowercase = AutoModelForSeqaSeqLM
lowercase = LANGUAGE_CODES
lowercase = ['''text''', '''text''', '''text''']
lowercase = ['''text''']
def UpperCAmelCase (self : Dict ,SCREAMING_SNAKE_CASE_ : Dict ,SCREAMING_SNAKE_CASE_ : Dict ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase = self.lang_to_code[src_lang]
lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''' ,src_lang=SCREAMING_SNAKE_CASE_ ,tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ,SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 535 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class lowercase ( lowercase__ ):
def __init__(self : str ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' ,SCREAMING_SNAKE_CASE_ ,)
super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 535 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A_ : int = None
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
A_ : Union[str, Any] = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = TaTokenizer
lowerCamelCase__ : List[int] = []
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="</s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_=1_0_0, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase__ : int = len(set(filter(lambda lowerCamelCase_ : bool('extra_id_' in str(lowerCamelCase_ ) ), lowerCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, pad_token=lowerCamelCase_, extra_ids=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : Any = False if not self.vocab_file else True
lowerCamelCase__ : Dict = extra_ids
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.', lowerCamelCase_, )
return max_model_length
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Tuple = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase__ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ (self ):
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase_ : bool(re.search(r'<extra_id_\d+>', lowerCamelCase_ ) ) is not None, self.additional_special_tokens ) ) )
def a__ (self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCamelCase_ ) for token in self.get_sentinel_tokens()]
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCamelCase( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
lowercase_ : Optional[int] = StableUnCLIPImgaImgPipeline
lowercase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : List[Any] = frozenset([] )
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = 32
_lowercase : Union[str, Any] = embedder_hidden_size
# image encoding components
_lowercase : Union[str, Any] = CLIPImageProcessor(crop_size=32, size=32)
torch.manual_seed(0)
_lowercase : str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase__, projection_dim=lowerCAmelCase__, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ))
# regular denoising components
torch.manual_seed(0)
_lowercase : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__)
_lowercase : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
_lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
_lowercase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCAmelCase__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ))
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'), up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='projection', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCAmelCase__, layers_per_block=1, upcast_attention=lowerCAmelCase__, use_linear_projection=lowerCAmelCase__, )
torch.manual_seed(0)
_lowercase : Optional[int] = DDIMScheduler(
beta_schedule='scaled_linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, prediction_type='v_prediction', set_alpha_to_one=lowerCAmelCase__, steps_offset=1, )
torch.manual_seed(0)
_lowercase : Tuple = AutoencoderKL()
_lowercase : Dict = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0, lowerCamelCase=True) -> str:
"""simple docstring"""
if str(lowerCAmelCase__).startswith('mps'):
_lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
_lowercase : Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
_lowercase : str = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
if pil_image:
_lowercase : Tuple = input_image * 0.5 + 0.5
_lowercase : List[Any] = input_image.clamp(0, 1)
_lowercase : Optional[Any] = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
_lowercase : Any = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : str = self.get_dummy_components()
_lowercase : List[Any] = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__)
_lowercase : List[Any] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__)
inputs.update({'image_embeds': None})
_lowercase : List[str] = sd_pipe(**lowerCAmelCase__).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[int] = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__)
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
_lowercase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img', torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Tuple = torch.Generator(device='cpu').manual_seed(0)
_lowercase : Tuple = pipe(lowerCAmelCase__, 'anime turle', generator=lowerCAmelCase__, output_type='np')
_lowercase : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__, lowerCAmelCase__)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
_lowercase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
_lowercase : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[Any] = torch.Generator(device='cpu').manual_seed(0)
_lowercase : str = pipe(lowerCAmelCase__, 'anime turle', generator=lowerCAmelCase__, output_type='np')
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__, lowerCAmelCase__)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa)
_lowercase : List[str] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Any = pipe(
lowerCAmelCase__, 'anime turtle', num_inference_steps=2, output_type='np', )
_lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 89 |
from itertools import count
def A_ ( _UpperCAmelCase = 50 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : Dict = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : int = 50000000 ):
'''simple docstring'''
__lowercase = set()
__lowercase = int((limit - 24) ** (1 / 2) )
__lowercase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __UpperCamelCase ) ) )
for primea in primes:
__lowercase = primea * primea
for primea in primes:
__lowercase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowercase = primea * primea * primea * primea
__lowercase = square + cube + tetr
if total >= limit:
break
ret.add(__UpperCamelCase )
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 339 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : int ) -> list[int]:
'''simple docstring'''
lowercase__ : List[str] = [True] * limit
lowercase__ : List[Any] = False
lowercase__ : str = False
lowercase__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ : List[str] = i * 2
while index < limit:
lowercase__ : Any = False
lowercase__ : Optional[int] = index + i
lowercase__ : List[Any] = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def SCREAMING_SNAKE_CASE__ ( _lowercase : int = 1_000_000 ) -> int:
'''simple docstring'''
lowercase__ : Optional[int] = prime_sieve(_lowercase )
lowercase__ : List[str] = 0
lowercase__ : Any = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
lowercase__ : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : List[str] = j - i
lowercase__ : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 266 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_lowercase , default=_lowercase , required=_lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_lowercase , default=0 , help='cuda_id.' , )
lowercase__ : Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
if not len(_lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowercase__ , lowercase__ : Any = imgs[0].size
lowercase__ : int = Image.new('RGB' , size=(cols * w, rows * h) )
lowercase__ , lowercase__ : Any = grid.size
for i, img in enumerate(_lowercase ):
grid.paste(_lowercase , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[int] , _lowercase : Tuple="robotic cat with wings" , _lowercase : Optional[Any]=7.5 , _lowercase : int=50 , _lowercase : str=1 , _lowercase : Any=42 , ) -> str:
'''simple docstring'''
lowercase__ : int = torch.Generator(pipeline.device ).manual_seed(_lowercase )
lowercase__ : Any = pipeline(
_lowercase , guidance_scale=_lowercase , num_inference_steps=_lowercase , generator=_lowercase , num_images_per_prompt=_lowercase , ).images
lowercase__ : Dict = int(math.sqrt(_lowercase ) )
lowercase__ : int = image_grid(_lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase: int = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase: Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
__UpperCamelCase: List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
__UpperCamelCase: int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
__UpperCamelCase: int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
__UpperCamelCase: Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase: List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
__UpperCamelCase: Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
__UpperCamelCase: Dict = unet.to(torch.device("""cuda""", args.cuda_id))
__UpperCamelCase: Optional[int] = pipeline.to(unet.device)
__UpperCamelCase, __UpperCamelCase: Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
__UpperCamelCase: List[str] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 266 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __snake_case:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=4 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Any = parent
__A : Optional[int] = batch_size
__A : List[str] = seq_length
__A : List[Any] = is_training
__A : List[str] = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : str = use_labels
__A : List[Any] = vocab_size
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Tuple = intermediate_multiple_size
__A : str = hidden_act
__A : Tuple = hidden_dropout
__A : Dict = attention_dropout
__A : int = weight_tying
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : int = type_sequence_label_size
__A : Dict = initializer_range
__A : Optional[Any] = num_labels
__A : Dict = num_choices
__A : Any = scope
def _a ( self ):
'''simple docstring'''
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Dict = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Optional[int] = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _a ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
__A : List[str] = True
return config, input_ids, input_mask, token_labels
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = GPTNeoXJapaneseModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
__A : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = True
__A : Union[str, Any] = GPTNeoXJapaneseModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = True
__A : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
__A : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase )
__A : Tuple = output_from_no_past['hidden_states'][0]
__A : Optional[int] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['hidden_states'][0]
# select random slice
__A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
__A : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __snake_case( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def _a ( self ):
'''simple docstring'''
__A : Tuple = GPTNeoXJapaneseModelTester(self )
__A : List[str] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__A : str = None
self.model_tester.create_and_check_model_as_decoder(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCamelCase )
@slow
def _a ( self ):
'''simple docstring'''
__A : str = 'abeja/gpt-neox-japanese-2.7b'
__A : str = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__A : Optional[Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__A : int = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCamelCase )
__A : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCamelCase )
__A : Dict = []
for prompt in prompts:
__A : str = tokenizer(__lowerCamelCase , return_tensors='pt' ).input_ids
__A : str = model.generate(__lowerCamelCase , max_length=50 )
__A : List[str] = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 711 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] =logging.get_logger(__name__)
lowerCamelCase : str ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Any ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCamelCase : Any ={
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
lowerCamelCase : Tuple ='''▁'''
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : Any = vocab_file
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
__A : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__A : Any = len(self.sp_model ) - 1
__A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : int = [self.cls_token_id]
__A : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : int = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ):
'''simple docstring'''
return len(self.sp_model )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A : int = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : str = []
__A : str = ''
__A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__A : Any = True
__A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
__A : Any = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__A : int = self.__dict__.copy()
__A : Optional[int] = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : str = {}
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A : Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , 'wb' ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 237 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "sew"
def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0_5 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = squeeze_factor
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# sequence classification
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
@property
def snake_case__ ( self ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 104 |
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = '#'
class snake_case :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : dict = {}
def A_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self._trie
for char in text:
if char not in trie:
__UpperCAmelCase : int = {}
__UpperCAmelCase : Dict = trie[char]
__UpperCAmelCase : int = True
def A_ ( self : List[Any] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
__UpperCAmelCase : Optional[int] = trie[char]
else:
return []
return self._elements(__lowercase )
def A_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for c, v in d.items():
__UpperCAmelCase : Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowercase__ :Union[str, Any] = Trie()
lowercase__ :Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->tuple:
"""simple docstring"""
__UpperCAmelCase : int = trie.find_word(UpperCAmelCase_ )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( ) ->None:
"""simple docstring"""
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 522 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowercase ( unittest.TestCase ):
a = JukeboxTokenizer
a = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowerCamelCase_ ( self: str ):
import torch
lowerCamelCase__ : Optional[int] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ : List[str] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase_ ( self: Optional[Any] ):
import torch
lowerCamelCase__ : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ : Any = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: int = ["""pixel_values"""]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = None , __a = True , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = size if size is not None else {'shortest_edge': 224}
A__ = get_size_dict(__a , default_to_square=__a )
A__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A__ = get_size_dict(__a , default_to_square=__a , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def _UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ):
"""simple docstring"""
A__ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A__ = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
"""simple docstring"""
A__ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ):
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__a , param_name='size' , default_to_square=__a )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__a , param_name='crop_size' , default_to_square=__a )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__a ) for image in images]
if do_resize:
A__ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
A__ = [to_channel_dimension_format(__a , __a ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 260 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
A__ , A__ = grid.shape
A__ = [-1, 1, 0, 0]
A__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A__ , A__ = [(0, source)], set()
A__ = np.full((rows, cols) ,np.inf )
A__ = 0
A__ = np.empty((rows, cols) ,dtype=lowerCAmelCase__ )
A__ = None
while queue:
((A__) , (A__)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A__ = []
while (x, y) != source:
path.append((x, y) )
A__ , A__ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
A__ , A__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ ,(dist + 1, (nx, ny)) )
A__ = dist + 1
A__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 1 |
from typing import Any
def _lowerCAmelCase ( UpperCamelCase__: list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
A = [input_list.count(UpperCamelCase__ ) for value in input_list]
A = max(UpperCamelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from sklearn.metrics import recall_score
import datasets
_lowercase : Any = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
_lowercase : int = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
_lowercase : Dict = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _UpperCAmelCase ( self , a__ , a__ , a__=None , a__=1 , a__="binary" , a__=None , a__="warn" , ) -> Any:
A = recall_score(
a__ , a__ , labels=a__ , pos_label=a__ , average=a__ , sample_weight=a__ , zero_division=a__ , )
return {"recall": float(a__ ) if score.size == 1 else score}
| 546 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a_ (__A , __A=False ) -> Any:
"""simple docstring"""
__a : int = OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def a_ (__A , __A=None , __A=None ) -> List[Any]:
"""simple docstring"""
if conf_path is None:
__a : Optional[int] = "./model_checkpoints/vqgan_only.yaml"
__a : List[Any] = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
__a : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__a : str = "./model_checkpoints/vqgan_only.pt"
__a : Any = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
__a : int = sd["state_dict"]
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def a_ (__A , __A ) -> Union[str, Any]:
"""simple docstring"""
__a , __a , __a : Tuple = model.encode(_SCREAMING_SNAKE_CASE )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
__a : Any = model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def a_ (__A , __A=False ) -> Union[str, Any]:
"""simple docstring"""
__a , __a : Tuple = string.rsplit("." , 1 )
if reload:
__a : Any = importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def a_ (__A ) -> Union[str, Any]:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def a_ (__A , __A , __A=True , __A=True ) -> Optional[int]:
"""simple docstring"""
__a : Dict = instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a_ (__A , __A , __A , __A ) -> Optional[int]:
"""simple docstring"""
if ckpt:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
__a : Optional[int] = pl_sd["global_step"]
print(f'loaded model from global step {global_step}.' )
else:
__a : Optional[int] = {"state_dict": None}
__a : List[Any] = None
__a : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )["model"]
return model, global_step
| 351 |
"""simple docstring"""
import torch
from transformers import AutoModel
class _a ( torch.nn.Module):
"""simple docstring"""
def __init__( self : Any , __UpperCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" )->List[str]:
super(__UpperCamelCase , self ).__init__()
_UpperCAmelCase = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
_UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
_UpperCAmelCase = torch.nn.Softmax(dim=1 )
def lowercase__ ( self : List[Any] , **__UpperCamelCase : Optional[int] )->str:
return self.bert(**__UpperCamelCase ).last_hidden_state
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] )->Optional[int]:
return token_embeddings.sum(2 , keepdim=__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=1 )->Tuple:
return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = W_supports['''sizes'''].tolist()
_UpperCAmelCase = W_supports['''start_token_id'''].item()
_UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCAmelCase = self.BERT(**__UpperCamelCase )
_UpperCAmelCase = self.BERT(**__UpperCamelCase )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
_UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__UpperCamelCase ):
if i == 0:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = support_sizes[i - 1]
_UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
_UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
_UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCAmelCase = torch.vstack((p_starts, p_start) )
_UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
_UpperCAmelCase = p_start
_UpperCAmelCase = p_end
return p_starts, p_ends
| 602 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( __a : Tuple , __a : Optional[Any]=False ) -> Optional[int]:
_lowercase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowercase =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase ( __a : str , __a : Optional[Any] , __a : Optional[int]=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase =""
else:
_lowercase ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_lowercase =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase =in_proj_weight[
: config.hidden_size, :
]
_lowercase =in_proj_bias[: config.hidden_size]
_lowercase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase =in_proj_weight[
-config.hidden_size :, :
]
_lowercase =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __a : Tuple ) -> Dict:
_lowercase =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __lowerCamelCase ( __a : Dict , __a : Optional[Any] , __a : List[str] ) -> Optional[int]:
_lowercase =dct.pop(__a )
_lowercase =val
def __lowerCamelCase ( ) -> int:
_lowercase ="http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __a : List[str] , __a : Any , __a : Optional[Any]=True ) -> Union[str, Any]:
_lowercase =ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowercase =8
# set labels if required
if not base_model:
_lowercase =1_000
_lowercase ="huggingface/label-files"
_lowercase ="imagenet-1k-id2label.json"
_lowercase =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
_lowercase ={int(__a ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowercase =384
_lowercase =1_536
_lowercase =12
_lowercase =6
# load original model from torch hub
_lowercase =torch.hub.load("facebookresearch/dino:main" , __a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowercase =original_model.state_dict()
if base_model:
remove_classification_head_(__a )
_lowercase =create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if base_model:
_lowercase =ViTModel(__a , add_pooling_layer=__a ).eval()
else:
_lowercase =ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor
_lowercase =ViTImageProcessor()
_lowercase =image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase =encoding["pixel_values"]
_lowercase =model(__a )
if base_model:
_lowercase =original_model(__a )
assert torch.allclose(__a , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowercase =original_model(__a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1E-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
lowerCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 594 | import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Tuple:
_lowercase =torch.nn.Linear(2 , 4 )
_lowercase =torch.optim.AdamW(model.parameters() , lr=1.0 )
_lowercase =torch.optim.lr_scheduler.OneCycleLR(__a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
_lowercase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowercase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( __a : Tuple ) -> Optional[int]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( __a : str ) -> Tuple:
_lowercase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__a )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
@require_cuda
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =Accelerator(cpu=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase =GradientState()
assert state.num_steps == 1
_lowercase =4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowercase =False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCAmelCase ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
with patch("torch.cuda.set_device" , lowerCAmelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_lowercase =Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =get_signature(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =get_signature(lowerCAmelCase_ )
# saving hook
def save_config(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase ={"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "w" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# loading hook
def load_config(lowerCAmelCase_ , lowerCAmelCase_ ):
with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "r" ) as f:
_lowercase =json.load(lowerCAmelCase_ )
_lowercase =config["class_name"]
_lowercase =accelerator.register_save_state_pre_hook(lowerCAmelCase_ )
_lowercase =accelerator.register_load_state_pre_hook(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match with hooks
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowercase ="random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowercase ="random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
_lowercase =None
# This should work
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(dummy_obj is None )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
_lowercase =[1, 2, 3]
# This should work
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map={"": 0} , )
_lowercase =Accelerator()
# This should work
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@slow
@require_bnb
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase =Accelerator()
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase ="cpu"
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase_ )
# This should not work and get value error
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase ={"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase =1
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , )
_lowercase =Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =accelerator.prepare(lowerCAmelCase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase =1
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , )
_lowercase =Accelerator()
# This should work
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@require_cuda
def __lowerCAmelCase ( self ):
_lowercase =torch.nn.Linear(10 , 10 )
_lowercase =torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowercase =Accelerator(cpu=lowerCAmelCase_ )
_lowercase =accelerator.prepare(lowerCAmelCase_ )
| 594 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
snake_case__ : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ) -> Union[str, Any]:
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Union[str, Any]:
if "text_queries" in kwargs:
UpperCamelCase_ = kwargs.pop('text_queries' )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCamelCase_ = {'image': image, 'candidate_labels': candidate_labels}
else:
UpperCamelCase_ = image
UpperCamelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = {}
if "threshold" in kwargs:
UpperCamelCase_ = kwargs['threshold']
if "top_k" in kwargs:
UpperCamelCase_ = kwargs['top_k']
return {}, {}, postprocess_params
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = load_image(inputs['image'] )
UpperCamelCase_ = inputs['candidate_labels']
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = candidate_labels.split(',' )
UpperCamelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCamelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
UpperCamelCase_ = model_inputs.pop('target_size' )
UpperCamelCase_ = model_inputs.pop('candidate_label' )
UpperCamelCase_ = model_inputs.pop('is_last' )
UpperCamelCase_ = self.model(**_UpperCAmelCase )
UpperCamelCase_ = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0.1 , _UpperCAmelCase=None ) -> List[str]:
UpperCamelCase_ = []
for model_output in model_outputs:
UpperCamelCase_ = model_output['candidate_label']
UpperCamelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCamelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
UpperCamelCase_ = outputs['scores'][index].item()
UpperCamelCase_ = self._get_bounding_box(outputs['boxes'][index][0] )
UpperCamelCase_ = {'score': score, 'label': label, 'box': box}
results.append(_UpperCAmelCase )
UpperCamelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCamelCase_ = results[:top_k]
return results
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = box.int().tolist()
UpperCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 23 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( __snake_case: Tuple ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def A ( __snake_case: str ) -> Optional[Any]:
"""simple docstring"""
for char in word:
__magic_name__ = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def A ( __snake_case: List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = set()
for token in tokens:
__magic_name__ = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__magic_name__ = list(__snake_case )
return word_list
def A ( __snake_case: List[str] , __snake_case: set() ) -> str:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ = max([len(__snake_case ) for w in chinese_word_set] )
__magic_name__ = bert_tokens
__magic_name__ , __magic_name__ = 0, len(__snake_case )
while start < end:
__magic_name__ = True
if is_chinese(bert_word[start] ):
__magic_name__ = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__magic_name__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__magic_name__ = '##' + bert_word[j]
__magic_name__ = start + i
__magic_name__ = False
break
if single_word:
start += 1
return bert_word
def A ( __snake_case: List[str] , __snake_case: LTP , __snake_case: BertTokenizer ) -> List[Any]:
"""simple docstring"""
__magic_name__ = []
for i in range(0 , len(__snake_case ) , 1_0_0 ):
__magic_name__ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
__magic_name__ = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__magic_name__ = []
for i in range(0 , len(__snake_case ) , 1_0_0 ):
__magic_name__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__snake_case ) == len(__snake_case )
__magic_name__ = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__magic_name__ = []
for id in input_ids:
__magic_name__ = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__magic_name__ = add_sub_symbol(__snake_case , __snake_case )
__magic_name__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__magic_name__ = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def A ( __snake_case: Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ = LTP(args.ltp ) # faster in GPU device
__magic_name__ = BertTokenizer.from_pretrained(args.bert )
__magic_name__ = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__magic_name__ = [json.dumps(__snake_case ) + '\n' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
snake_case : Any = parser.parse_args()
main(args) | 545 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
A__ = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( a_ : Dict ,a_ : Any ,a_ : str=None ) -> Tuple:
'''simple docstring'''
if rng is None:
__magic_name__ = random.Random()
__magic_name__ = 1
for dim in shape:
total_dims *= dim
__magic_name__ = []
for _ in range(a_ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__magic_name__ = np.array(a_ ,dtype=jnp.intaa ).reshape(a_ )
return output
def _lowercase ( a_ : Dict ,a_ : List[str]=None ) -> Dict:
'''simple docstring'''
__magic_name__ = ids_tensor(a_ ,vocab_size=2 ,rng=a_ )
# make sure that at least one token is attended to for each batch
__magic_name__ = 1
return attn_mask
@require_flax
class __UpperCamelCase :
_lowercase : List[Any] = None
_lowercase : Optional[Any] = ()
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__, __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__magic_name__ = 2
__magic_name__ = inputs['input_ids'].shape[-1] // 2
__magic_name__ = inputs['input_ids'][:max_batch_size, :sequence_length]
__magic_name__ = jnp.ones_like(__UpperCamelCase )
__magic_name__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__magic_name__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__magic_name__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = False
__magic_name__ = max_length
__magic_name__ = 0
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ = getattr(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = pt_model_class(__UpperCamelCase ).eval()
__magic_name__ = load_flax_weights_in_pytorch_model(__UpperCamelCase , flax_model.params )
__magic_name__ = flax_model.generate(__UpperCamelCase ).sequences
__magic_name__ = pt_model.generate(torch.tensor(__UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__magic_name__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = False
__magic_name__ = max_length
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = True
__magic_name__ = max_length
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = False
__magic_name__ = max_length
__magic_name__ = 2
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = False
__magic_name__ = max_length
__magic_name__ = 2
__magic_name__ = 2
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = True
__magic_name__ = max_length
__magic_name__ = 0.8
__magic_name__ = 10
__magic_name__ = 0.3
__magic_name__ = 1
__magic_name__ = 8
__magic_name__ = 9
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = max_length
__magic_name__ = 1
__magic_name__ = 8
__magic_name__ = 9
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
__magic_name__ = max_length
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 8
__magic_name__ = 9
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__ = attention_mask.at[(0, 0)].set(0 )
__magic_name__ = False
__magic_name__ = max_length
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__ = attention_mask.at[(0, 0)].set(0 )
__magic_name__ = True
__magic_name__ = max_length
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__ = attention_mask.at[(0, 0)].set(0 )
__magic_name__ = 2
__magic_name__ = max_length
for model_class in self.all_generative_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
__magic_name__ = jit(model.generate )
__magic_name__ = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
__magic_name__ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__magic_name__ = 'Hello world'
__magic_name__ = tokenizer(__UpperCamelCase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCamelCase , 'do_samples' ):
model.generate(__UpperCamelCase , do_samples=__UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCamelCase , 'foo' ):
__magic_name__ = {'foo': 'bar'}
model.generate(__UpperCamelCase , **__UpperCamelCase )
| 184 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = "time_series_transformer"
_lowercase : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: str , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: str = "student_t" , __UpperCamelCase: str = "nll" , __UpperCamelCase: int = 1 , __UpperCamelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase: Optional[Union[str, bool]] = "mean" , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , __UpperCamelCase: str = "gelu" , __UpperCamelCase: int = 64 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: int = 1_00 , __UpperCamelCase: float = 0.02 , __UpperCamelCase: Any=True , **__UpperCamelCase: Optional[Any] , ):
'''simple docstring'''
__magic_name__ = prediction_length
__magic_name__ = context_length or prediction_length
__magic_name__ = distribution_output
__magic_name__ = loss
__magic_name__ = input_size
__magic_name__ = num_time_features
__magic_name__ = lags_sequence
__magic_name__ = scaling
__magic_name__ = num_dynamic_real_features
__magic_name__ = num_static_real_features
__magic_name__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = cardinality
else:
__magic_name__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = embedding_dimension
else:
__magic_name__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ = num_parallel_samples
# Transformer architecture configuration
__magic_name__ = input_size * len(__UpperCamelCase ) + self._number_of_features
__magic_name__ = d_model
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_attention_heads
__magic_name__ = encoder_ffn_dim
__magic_name__ = decoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = decoder_layers
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = use_cache
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 184 | 1 |
"""simple docstring"""
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
a_ : Union[str, Any] = (0, 0)
a_ : Any = None
a_ : List[Any] = 0
a_ : Dict = 0
a_ : Dict = 0
def __eq__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.position == cell.position
def A ( self ) -> int:
print(self.position )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE=(5, 5) ) -> List[str]:
a_ : Any = np.zeros(_SCREAMING_SNAKE_CASE )
a_ : Dict = world_size[0]
a_ : Optional[Any] = world_size[1]
def A ( self ) -> int:
print(self.w )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a_ : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a_ : int = cell.position[0]
a_ : Union[str, Any] = cell.position[1]
a_ : Optional[Any] = []
for n in neughbour_cord:
a_ : Any = current_x + n[0]
a_ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a_ : int = Cell()
a_ : List[str] = (x, y)
a_ : Tuple = cell
neighbours.append(_SCREAMING_SNAKE_CASE )
return neighbours
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Any ) -> str:
a_ : List[Any] = []
a_ : Tuple = []
_open.append(_SCREAMING_SNAKE_CASE )
while _open:
a_ : Dict = np.argmin([n.f for n in _open] )
a_ : Union[str, Any] = _open[min_f]
_closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
a_ : Optional[Any] = current.g + 1
a_ , a_ : Union[str, Any] = n.position
a_ , a_ : Union[str, Any] = goal.position
a_ : int = (ya - ya) ** 2 + (xa - xa) ** 2
a_ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = []
while current.parent is not None:
path.append(current.position )
a_ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase = Gridworld()
# Start position and goal
UpperCamelCase = Cell()
UpperCamelCase = (0, 0)
UpperCamelCase = Cell()
UpperCamelCase = (4, 4)
print(F'path from {start.position} to {goal.position}')
UpperCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase = 1
print(world.w)
| 473 | """simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase = logging.getLogger(__name__)
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = """masked_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="topK" , _SCREAMING_SNAKE_CASE="constant" , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = vocab_size
a_ : Optional[int] = hidden_size
a_ : Any = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Dict = hidden_act
a_ : Dict = intermediate_size
a_ : Optional[int] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : Optional[Any] = initializer_range
a_ : Union[str, Any] = layer_norm_eps
a_ : Tuple = pruning_method
a_ : Tuple = mask_init
a_ : Dict = mask_scale
| 473 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = 4_2
class lowercase__( snake_case__ , snake_case__ ):
'''simple docstring'''
snake_case__ = True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE = (64,) , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "silu" , __SCREAMING_SNAKE_CASE = 4 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = 0.1_82_15 , ) -> List[str]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase__ : Any =Encoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , down_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , double_z=__SCREAMING_SNAKE_CASE , )
# pass init params to Decoder
UpperCamelCase__ : List[Any] =Decoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , up_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict =nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
UpperCamelCase__ : Dict =nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1)
UpperCamelCase__ : str =False
UpperCamelCase__ : str =False
# only relevant if vae tiling is enabled
UpperCamelCase__ : Optional[Any] =self.config.sample_size
UpperCamelCase__ : Union[str, Any] =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
UpperCamelCase__ : Optional[Any] =int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
UpperCamelCase__ : Any =0.25
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False) -> Optional[int]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , (Encoder, Decoder)):
UpperCamelCase__ : int =value
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE = True) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int =use_tiling
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_tiling(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple =True
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : str =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase ( self) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] ={}
def fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
if hasattr(__SCREAMING_SNAKE_CASE , "set_processor"):
UpperCamelCase__ : Any =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return processors
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] =len(self.attn_processors.keys())
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and len(__SCREAMING_SNAKE_CASE) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__SCREAMING_SNAKE_CASE)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
if hasattr(__SCREAMING_SNAKE_CASE , "set_processor"):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
module.set_processor(__SCREAMING_SNAKE_CASE)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for name, module in self.named_children():
fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE)
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase__ : List[str] =[self.encoder(__SCREAMING_SNAKE_CASE) for x_slice in x.split(1)]
UpperCamelCase__ : Optional[int] =torch.cat(__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : List[Any] =self.encoder(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =self.quant_conv(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self.post_quant_conv(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =self.decoder(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE)
@apply_forward_hook
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase__ : List[Any] =[self._decode(__SCREAMING_SNAKE_CASE).sample for z_slice in z.split(1)]
UpperCamelCase__ : Any =torch.cat(__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : Any =self._decode(__SCREAMING_SNAKE_CASE).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] =min(a.shape[2] , b.shape[2] , __SCREAMING_SNAKE_CASE)
for y in range(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[int] =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict =min(a.shape[3] , b.shape[3] , __SCREAMING_SNAKE_CASE)
for x in range(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[int] =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True) -> AutoencoderKLOutput:
"""simple docstring"""
UpperCamelCase__ : List[str] =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
UpperCamelCase__ : Optional[int] =int(self.tile_latent_min_size * self.tile_overlap_factor)
UpperCamelCase__ : Optional[int] =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase__ : int =[]
for i in range(0 , x.shape[2] , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : List[Any] =[]
for j in range(0 , x.shape[3] , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Dict =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase__ : Optional[int] =self.encoder(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =self.quant_conv(__SCREAMING_SNAKE_CASE)
row.append(__SCREAMING_SNAKE_CASE)
rows.append(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =[]
for i, row in enumerate(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Union[str, Any] =[]
for j, tile in enumerate(__SCREAMING_SNAKE_CASE):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ : Tuple =self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if j > 0:
UpperCamelCase__ : List[Any] =self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3))
UpperCamelCase__ : Optional[Any] =torch.cat(__SCREAMING_SNAKE_CASE , dim=2)
UpperCamelCase__ : Optional[Any] =DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
UpperCamelCase__ : Dict =int(self.tile_sample_min_size * self.tile_overlap_factor)
UpperCamelCase__ : Any =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase__ : Union[str, Any] =[]
for i in range(0 , z.shape[2] , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Tuple =[]
for j in range(0 , z.shape[3] , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[Any] =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase__ : Optional[int] =self.post_quant_conv(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =self.decoder(__SCREAMING_SNAKE_CASE)
row.append(__SCREAMING_SNAKE_CASE)
rows.append(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =[]
for i, row in enumerate(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : int =[]
for j, tile in enumerate(__SCREAMING_SNAKE_CASE):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ : Tuple =self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if j > 0:
UpperCamelCase__ : List[Any] =self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3))
UpperCamelCase__ : Any =torch.cat(__SCREAMING_SNAKE_CASE , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase__ : int =sample
UpperCamelCase__ : Dict =self.encode(__SCREAMING_SNAKE_CASE).latent_dist
if sample_posterior:
UpperCamelCase__ : Dict =posterior.sample(generator=__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : Tuple =posterior.mode()
UpperCamelCase__ : str =self.decode(__SCREAMING_SNAKE_CASE).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE)
| 704 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( A_ : Tuple , A_ : str ) -> List[Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( A_ : List[Any] , A_ : str , A_ : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any =tmp_path / "cache"
UpperCamelCase__ : int ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Any =ParquetDatasetReader(A_ , cache_dir=A_ , keep_in_memory=A_ ).read()
_check_parquet_dataset(A_ , A_ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict =tmp_path / "cache"
UpperCamelCase__ : List[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : int =features.copy() if features else default_expected_features
UpperCamelCase__ : List[str] =(
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : Any =ParquetDatasetReader(A_ , features=A_ , cache_dir=A_ ).read()
_check_parquet_dataset(A_ , A_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( A_ : Optional[Any] , A_ : Tuple , A_ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =tmp_path / "cache"
UpperCamelCase__ : Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : List[Any] =ParquetDatasetReader(A_ , cache_dir=A_ , split=A_ ).read()
_check_parquet_dataset(A_ , A_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _lowerCamelCase ( A_ : Any , A_ : int , A_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(A_ , A_ ):
UpperCamelCase__ : Optional[int] =parquet_path
elif issubclass(A_ , A_ ):
UpperCamelCase__ : List[Any] =[parquet_path]
UpperCamelCase__ : Optional[Any] =tmp_path / "cache"
UpperCamelCase__ : Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : Dict =ParquetDatasetReader(A_ , cache_dir=A_ ).read()
_check_parquet_dataset(A_ , A_ )
def _lowerCamelCase ( A_ : str , A_ : List[str] , A_ : int=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
for split in splits:
UpperCamelCase__ : Dict =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( A_ : List[str] , A_ : Tuple , A_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple =tmp_path / "cache"
UpperCamelCase__ : str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Optional[Any] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=A_ , keep_in_memory=A_ ).read()
_check_parquet_datasetdict(A_ , A_ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( A_ : List[str] , A_ : Dict , A_ : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =tmp_path / "cache"
UpperCamelCase__ : str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : Any =features.copy() if features else default_expected_features
UpperCamelCase__ : int =(
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : str =ParquetDatasetReader({"train": parquet_path} , features=A_ , cache_dir=A_ ).read()
_check_parquet_datasetdict(A_ , A_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( A_ : Any , A_ : Dict , A_ : Tuple ) -> Any:
'''simple docstring'''
if split:
UpperCamelCase__ : str ={split: parquet_path}
else:
UpperCamelCase__ : Optional[Any] ="train"
UpperCamelCase__ : Optional[int] ={"train": parquet_path, "test": parquet_path}
UpperCamelCase__ : Any =tmp_path / "cache"
UpperCamelCase__ : List[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : str =ParquetDatasetReader(A_ , cache_dir=A_ ).read()
_check_parquet_datasetdict(A_ , A_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( A_ : Optional[int] , A_ : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =ParquetDatasetWriter(A_ , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCamelCase__ : str =pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCamelCase__ : Optional[int] =pf.read()
assert dataset.data.table == output_table
def _lowerCamelCase ( A_ : Optional[Any] , A_ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] =str(shared_datadir / "test_image_rgb.jpg" )
UpperCamelCase__ : Dict ={"image": [image_path]}
UpperCamelCase__ : Union[str, Any] =Features({"image": Image()} )
UpperCamelCase__ : Dict =Dataset.from_dict(A_ , features=A_ )
UpperCamelCase__ : Any =ParquetDatasetWriter(A_ , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCamelCase__ : Any =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase__ : Dict =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=A_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _lowerCamelCase ( A_ : Any , A_ : List[Any] ) -> Dict:
'''simple docstring'''
assert get_writer_batch_size(A_ ) == expected
| 582 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
UpperCAmelCase__ : List[Any] = TypeVar('U')
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = key
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] | None = None
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
def __init__(self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rear, self.head
def __repr__(self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""DoubleLinkedList"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE__ : int = node
SCREAMING_SNAKE_CASE__ : Optional[Any] = previous
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = self.rear
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Optional[int] = node.prev
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
return node
class lowerCAmelCase_ (Generic[T, U] ):
"""simple docstring"""
__UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : DoubleLinkedList[T, U] = DoubleLinkedList()
SCREAMING_SNAKE_CASE__ : List[Any] = capacity
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__(self , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return key in self.cache
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] = self.cache[key]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__ )
return node.val
self.miss += 1
return None
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE__ : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE__ : List[str] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE__ : Optional[int] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
self.list.add(SCREAMING_SNAKE_CASE__ )
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE__ : List[str] = LRUCache(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE__ : Tuple = func(*SCREAMING_SNAKE_CASE__ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ , """cache_info""" , SCREAMING_SNAKE_CASE__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase_ (pl.LightningModule ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
# load longformer model from model identifier
SCREAMING_SNAKE_CASE__ : Optional[Any] = LongformerModel.from_pretrained(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = LightningModel(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.load(_snake_case ,map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
SCREAMING_SNAKE_CASE__ : int = LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 223 | 1 |
'''simple docstring'''
lowerCamelCase__ = tuple[float, float, float]
lowerCamelCase__ = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE( snake_case_ : Pointad , snake_case_ : Pointad ) ->Vectorad:
'''simple docstring'''
_lowercase : int = end_pointa[0] - end_pointa[0]
_lowercase : str = end_pointa[1] - end_pointa[1]
_lowercase : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE( snake_case_ : Vectorad , snake_case_ : Vectorad ) ->Vectorad:
'''simple docstring'''
_lowercase : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowercase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowercase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE( snake_case_ : Vectorad , snake_case_ : int ) ->bool:
'''simple docstring'''
return tuple(round(snake_case_ , snake_case_ ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE( snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : int = 10 ) ->bool:
'''simple docstring'''
_lowercase : Any = create_vector(snake_case_ , snake_case_ )
_lowercase : List[Any] = create_vector(snake_case_ , snake_case_ )
return is_zero_vector(get_ad_vectors_cross(snake_case_ , snake_case_ ) , snake_case_ )
| 720 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = 'naver-clova-ix/donut-base'
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : str = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_lowercase : List[str] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_lowercase : str = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 411 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : int = StableDiffusionDiffEditPipeline
lowerCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowerCAmelCase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowerCAmelCase_ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : int = frozenset([] )
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
_a = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
_a = CLIPTextModel(UpperCamelCase__ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Dict=0 ):
_a = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("mps" ):
_a = torch.manual_seed(UpperCamelCase__ )
else:
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :Tuple=0 ):
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" )
if str(UpperCamelCase__ ).startswith("mps" ):
_a = torch.manual_seed(UpperCamelCase__ )
else:
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :Tuple , UpperCamelCase__ :Union[str, Any]=0 ):
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" )
if str(UpperCamelCase__ ).startswith("mps" ):
_a = torch.manual_seed(UpperCamelCase__ )
else:
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_a = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
_a = self.get_dummy_components()
_a = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_a = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_a = self.get_dummy_inputs(UpperCamelCase__ )
_a = pipe_loaded(**UpperCamelCase__ )[0]
_a = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_mask_inputs(UpperCamelCase__ )
_a = pipe.generate_mask(**UpperCamelCase__ )
_a = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_a = np.array([0] * 9 )
_a = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inversion_inputs(UpperCamelCase__ )
_a = pipe.invert(**UpperCamelCase__ ).images
_a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = "cpu"
_a = self.get_dummy_components()
_a = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
_a = DPMSolverMultistepScheduler(**UpperCamelCase__ )
_a = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
_a = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = self.get_dummy_inversion_inputs(UpperCamelCase__ )
_a = pipe.invert(**UpperCamelCase__ ).images
_a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Dict ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
_a = raw_image.convert("RGB" ).resize((768, 768) )
_a = raw_image
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = torch.manual_seed(0 )
_a = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
_a = DDIMScheduler.from_config(pipe.scheduler.config )
_a = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "a bowl of fruit"
_a = "a bowl of pears"
_a = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
_a = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
_a = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
_a = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = torch.manual_seed(0 )
_a = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_a = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "a bowl of fruit"
_a = "a bowl of pears"
_a = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
_a = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
_a = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
_a = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 388 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Optional[int] , **UpperCamelCase__ :List[str] ):
super().__init__(**UpperCamelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Tuple , UpperCamelCase__ :Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase__ :Dict ):
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , **UpperCamelCase__ :List[str] ):
_a = {}
if "candidate_labels" in kwargs:
_a = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_a = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :str=None , UpperCamelCase__ :Dict="This is a photo of {}." ):
_a = load_image(UpperCamelCase__ )
_a = self.image_processor(images=[image] , return_tensors=self.framework )
_a = candidate_labels
_a = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
_a = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
_a = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[str] ):
_a = model_inputs.pop("candidate_labels" )
_a = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
_a = text_inputs[0]
else:
# Batching case.
_a = text_inputs[0][0]
_a = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
_a = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :int ):
_a = model_outputs.pop("candidate_labels" )
_a = model_outputs["logits"][0]
if self.framework == "pt":
_a = logits.softmax(dim=-1 ).squeeze(-1 )
_a = probs.tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_a = [scores]
elif self.framework == "tf":
_a = stable_softmax(UpperCamelCase__ , axis=-1 )
_a = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 388 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__a: Dict = ['''gpt2''']
__a: Any = '''gpt2'''
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoConfig.from_pretrained(lowerCamelCase )
_UpperCAmelCase = TFGPTaLMHeadModel.from_config(lowerCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(lowerCamelCase )
_UpperCAmelCase = tokenized["""input_ids"""].to_tensor()
_UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_UpperCAmelCase = self.model(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [GPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_UpperCAmelCase = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_UpperCAmelCase = tokenizer([test_inputs] , return_tensors="""tf""" )
_UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_UpperCAmelCase = python_outputs[key].numpy()
_UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf.function(lowerCamelCase )
for test_inputs in self.test_sentences:
_UpperCAmelCase = tf.constant(lowerCamelCase )
_UpperCAmelCase = compiled_tokenizer(lowerCamelCase )
_UpperCAmelCase = tf_tokenizer(lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = ModelToSave(tokenizer=lowerCamelCase )
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = model.serving(lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCAmelCase = Path(lowerCamelCase ) / """saved.model"""
tf.saved_model.save(lowerCamelCase , lowerCamelCase , signatures={"""serving_default""": model.serving} )
_UpperCAmelCase = tf.saved_model.load(lowerCamelCase )
_UpperCAmelCase = loaded_model.signatures["""serving_default"""](lowerCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = tf_tokenizer(lowerCamelCase ) # Build model with some sample inputs
_UpperCAmelCase = tf_tokenizer.get_config()
_UpperCAmelCase = TFGPTaTokenizer.from_config(lowerCamelCase )
_UpperCAmelCase = model_from_config(lowerCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_UpperCAmelCase = 12_3123
for max_length in [3, 5, 1024]:
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = tf_tokenizer(lowerCamelCase , max_length=lowerCamelCase )
_UpperCAmelCase = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length | 402 |
__a: int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__a: List[str] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[list[int]]:
_UpperCAmelCase = len(__snake_case ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_UpperCAmelCase = []
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_UpperCAmelCase = order[len(__snake_case ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list | 402 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
__magic_name__ = PegasusConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=13 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=20 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Dict=0 , ) -> Optional[Any]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : int = eos_token_id
UpperCAmelCase_ : Dict = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase_ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : List[Any] = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = 20
UpperCAmelCase_ : Tuple = model_class_name(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : str = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = model.decode(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Any:
UpperCAmelCase_ : Dict = 20
UpperCAmelCase_ : int = model_class_name(lowerCAmelCase_ )
UpperCAmelCase_ : int = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : str = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : str = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case ( A__ ,A__ ,A__ ,A__=None ,A__=None ,):
if attention_mask is None:
UpperCAmelCase_ : List[Any] = np.not_equal(A__ ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : List[str] = FlaxPegasusModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Dict = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ )
@jax.jit
def encode_jitted(lowerCAmelCase_ : Any , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Any ):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Dict = encode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[int] = encode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Tuple = decode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = np.ones((1, 1) )
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ : List[str] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCAmelCase_ : Dict = tokenizer(lowerCAmelCase_ , return_tensors="np" , truncation=lowerCAmelCase_ , max_length=512 , padding=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model.generate(**lowerCAmelCase_ , num_beams=2 ).sequences
UpperCAmelCase_ : int = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
assert tgt_text == decoded
| 95 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowercase_ : Optional[int] = [False] * len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ):
lowercase_ : List[str] = True
lowercase_ : str = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowercase : List[str] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 717 | '''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple ='''markuplm'''
def __init__( self :Optional[Any], snake_case :List[Any]=3_0522, snake_case :Optional[int]=768, snake_case :Union[str, Any]=12, snake_case :int=12, snake_case :Optional[int]=3072, snake_case :int="gelu", snake_case :List[str]=0.1, snake_case :Dict=0.1, snake_case :int=512, snake_case :List[Any]=2, snake_case :List[Any]=0.0_2, snake_case :Any=1e-1_2, snake_case :Optional[int]=0, snake_case :Optional[int]=0, snake_case :List[Any]=2, snake_case :Union[str, Any]=256, snake_case :Optional[int]=1024, snake_case :str=216, snake_case :Tuple=1001, snake_case :Optional[Any]=32, snake_case :Union[str, Any]=50, snake_case :int="absolute", snake_case :Dict=True, snake_case :List[Any]=None, **snake_case :Optional[int], ):
"""simple docstring"""
super().__init__(
pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case, )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =classifier_dropout
# additional properties
_lowercase =max_depth
_lowercase =max_xpath_tag_unit_embeddings
_lowercase =max_xpath_subs_unit_embeddings
_lowercase =tag_pad_id
_lowercase =subs_pad_id
_lowercase =xpath_unit_hidden_size
| 181 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : int ='''summarization'''
__lowerCAmelCase : str =['''loss''']
__lowerCAmelCase : Dict =ROUGE_KEYS
__lowerCAmelCase : str ='''rouge2'''
def __init__( self :Dict, snake_case :List[Any], **snake_case :Tuple):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowercase =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(snake_case, num_labels=snake_case, mode=self.mode, **snake_case)
use_task_specific_params(self.model, 'summarization')
save_git_info(self.hparams.output_dir)
_lowercase =Path(self.output_dir) / 'metrics.json'
_lowercase =Path(self.output_dir) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path)
_lowercase =0
_lowercase =defaultdict(snake_case)
_lowercase =self.config.model_type
_lowercase =self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_lowercase ={
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowercase ={
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_lowercase ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowercase ={
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
_lowercase =get_git_info()['repo_sha']
_lowercase =hparams.num_workers
_lowercase =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, snake_case):
_lowercase =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowercase =self.decoder_start_token_id
_lowercase =(
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeqaSeqDataset
)
_lowercase =False
_lowercase =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowercase =self.hparams.eval_max_gen_length
else:
_lowercase =self.model.config.max_length
_lowercase =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self :str, snake_case :Dict[str, torch.Tensor]):
"""simple docstring"""
_lowercase ={
k: self.tokenizer.batch_decode(v.tolist()) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(snake_case, Path(self.output_dir) / 'text_batch.json')
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / 'tok_batch.json')
_lowercase =True
return readable_batch
def UpperCamelCase__ ( self :Dict, snake_case :List[str], **snake_case :List[Any]):
"""simple docstring"""
return self.model(snake_case, **snake_case)
def UpperCamelCase__ ( self :Any, snake_case :List[int]):
"""simple docstring"""
_lowercase =self.tokenizer.batch_decode(
snake_case, skip_special_tokens=snake_case, clean_up_tokenization_spaces=snake_case)
return lmap(str.strip, snake_case)
def UpperCamelCase__ ( self :Union[str, Any], snake_case :dict):
"""simple docstring"""
_lowercase =self.tokenizer.pad_token_id
_lowercase , _lowercase =batch['input_ids'], batch['attention_mask']
_lowercase =batch['labels']
if isinstance(self.model, snake_case):
_lowercase =self.model._shift_right(snake_case)
else:
_lowercase =shift_tokens_right(snake_case, snake_case)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowercase =decoder_input_ids
self.save_readable_batch(snake_case)
_lowercase =self(snake_case, attention_mask=snake_case, decoder_input_ids=snake_case, use_cache=snake_case)
_lowercase =outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowercase =nn.CrossEntropyLoss(ignore_index=snake_case)
assert lm_logits.shape[-1] == self.vocab_size
_lowercase =ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
else:
_lowercase =nn.functional.log_softmax(snake_case, dim=-1)
_lowercase , _lowercase =label_smoothed_nll_loss(
snake_case, snake_case, self.hparams.label_smoothing, ignore_index=snake_case)
return (loss,)
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self :Tuple, snake_case :Dict, snake_case :str):
"""simple docstring"""
_lowercase =self._step(snake_case)
_lowercase =dict(zip(self.loss_names, snake_case))
# tokens per batch
_lowercase =batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum()
_lowercase =batch['input_ids'].shape[0]
_lowercase =batch['input_ids'].eq(self.pad).sum()
_lowercase =batch['input_ids'].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self :List[Any], snake_case :Dict, snake_case :List[Any]):
"""simple docstring"""
return self._generative_step(snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :List[str], snake_case :str="val"):
"""simple docstring"""
self.step_count += 1
_lowercase ={k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
_lowercase =losses['loss']
_lowercase ={
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_lowercase =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowercase =torch.tensor(snake_case).type_as(snake_case)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(snake_case)
_lowercase ={f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_lowercase =self.step_count
self.metrics[prefix].append(snake_case) # callback writes this to self.metrics_save_path
_lowercase =flatten_list([x['preds'] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase__ ( self :Any, snake_case :str, snake_case :List[str]):
"""simple docstring"""
return calculate_rouge(snake_case, snake_case)
def UpperCamelCase__ ( self :str, snake_case :dict):
"""simple docstring"""
_lowercase =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowercase =self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
_lowercase =(time.time() - ta) / batch['input_ids'].shape[0]
_lowercase =self.ids_to_clean_text(snake_case)
_lowercase =self.ids_to_clean_text(batch['labels'])
_lowercase =self._step(snake_case)
_lowercase =dict(zip(self.loss_names, snake_case))
_lowercase =self.calc_generative_metrics(snake_case, snake_case)
_lowercase =np.mean(lmap(snake_case, snake_case))
base_metrics.update(gen_time=snake_case, gen_len=snake_case, preds=snake_case, target=snake_case, **snake_case)
return base_metrics
def UpperCamelCase__ ( self :Optional[Any], snake_case :str, snake_case :Optional[int]):
"""simple docstring"""
return self._generative_step(snake_case)
def UpperCamelCase__ ( self :Union[str, Any], snake_case :List[str]):
"""simple docstring"""
return self.validation_epoch_end(snake_case, prefix='test')
def UpperCamelCase__ ( self :List[Any], snake_case :str):
"""simple docstring"""
_lowercase =self.n_obs[type_path]
_lowercase =self.target_lens[type_path]
_lowercase =self.dataset_class(
self.tokenizer, type_path=snake_case, n_obs=snake_case, max_target_length=snake_case, **self.dataset_kwargs, )
return dataset
def UpperCamelCase__ ( self :str, snake_case :str, snake_case :int, snake_case :bool = False):
"""simple docstring"""
_lowercase =self.get_dataset(snake_case)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowercase =dataset.make_sortish_sampler(snake_case, distributed=self.hparams.gpus > 1)
return DataLoader(
snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowercase =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1)
return DataLoader(
snake_case, batch_sampler=snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, )
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=snake_case)
return dataloader
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def UpperCamelCase__ ( snake_case :Dict, snake_case :List[str]):
"""simple docstring"""
BaseTransformer.add_model_specific_args(snake_case, snake_case)
add_generic_args(snake_case, snake_case)
parser.add_argument(
'--max_source_length', default=1024, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=56, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=142, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=142, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=snake_case)
parser.add_argument('--overwrite_output_dir', action='store_true', default=snake_case)
parser.add_argument('--max_tokens_per_batch', type=snake_case, default=snake_case)
parser.add_argument('--logger_name', type=snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=snake_case, default=500, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument(
'--task', type=snake_case, default='summarization', required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=snake_case, default=0.0, required=snake_case)
parser.add_argument('--src_lang', type=snake_case, default='', required=snake_case)
parser.add_argument('--tgt_lang', type=snake_case, default='', required=snake_case)
parser.add_argument('--eval_beams', type=snake_case, default=snake_case, required=snake_case)
parser.add_argument(
'--val_metric', type=snake_case, default=snake_case, required=snake_case, choices=['bleu', 'rouge2', 'loss', None])
parser.add_argument('--eval_max_gen_length', type=snake_case, default=snake_case, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=snake_case, default=1, required=snake_case, help='How many checkpoints to save')
parser.add_argument(
'--early_stopping_patience', type=snake_case, default=-1, required=snake_case, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] ='''translation'''
__lowerCAmelCase : Any =['''loss''']
__lowerCAmelCase : Optional[int] =['''bleu''']
__lowerCAmelCase : Any ='''bleu'''
def __init__( self :str, snake_case :Union[str, Any], **snake_case :Any):
"""simple docstring"""
super().__init__(snake_case, **snake_case)
_lowercase =hparams.src_lang
_lowercase =hparams.tgt_lang
def UpperCamelCase__ ( self :Dict, snake_case :Union[str, Any], snake_case :Any):
"""simple docstring"""
return calculate_bleu(snake_case, snake_case)
def _snake_case (_snake_case : Dict , _snake_case : int=None) -> SummarizationModule:
Path(args.output_dir).mkdir(exist_ok=_snake_case)
check_output_dir(_snake_case , expected_items=3)
if model is None:
if "summarization" in args.task:
_lowercase =SummarizationModule(_snake_case)
else:
_lowercase =TranslationModule(_snake_case)
_lowercase =Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith('/tmp')
or str(args.output_dir).startswith('/var')
):
_lowercase =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowercase =os.environ.get('WANDB_PROJECT' , _snake_case)
_lowercase =WandbLogger(name=model.output_dir.name , project=_snake_case)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowercase =WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''')
if args.early_stopping_patience >= 0:
_lowercase =get_early_stopping_callback(model.val_metric , args.early_stopping_patience)
else:
_lowercase =False
_lowercase =args.val_metric == 'loss'
_lowercase =generic_train(
_snake_case , _snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _snake_case) , early_stopping_callback=_snake_case , logger=_snake_case , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl')
if not args.do_predict:
return model
_lowercase =''
_lowercase =sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt') , recursive=_snake_case))
if checkpoints:
_lowercase =checkpoints[-1]
_lowercase =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser)
_SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 181 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : int ) -> str:
_lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase = bertabert.config.encoder.vocab_size
_lowerCamelCase = tokenizer.sep_token_id
_lowerCamelCase = tokenizer.cls_token_id
_lowerCamelCase = 1_2_8
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase = train_dataset.select(range(3_2 ) )
_lowerCamelCase = val_dataset.select(range(1_6 ) )
_lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(snake_case__ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=snake_case__ , max_length=5_1_2 )
_lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=snake_case__ , max_length=1_2_8 )
_lowerCamelCase = inputs.input_ids
_lowerCamelCase = inputs.attention_mask
_lowerCamelCase = outputs.input_ids
_lowerCamelCase = outputs.input_ids.copy()
_lowerCamelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowerCamelCase = outputs.attention_mask
assert all(len(snake_case__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(snake_case__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ : Union[str, Any] ):
_lowerCamelCase = pred.label_ids
_lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case__ ) )] ) / len(snake_case__ )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=snake_case__ , per_device_train_batch_size=snake_case__ , per_device_eval_batch_size=snake_case__ , predict_with_generate=snake_case__ , evaluation_strategy='steps' , do_train=snake_case__ , do_eval=snake_case__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase = SeqaSeqTrainer(
model=snake_case__ , args=snake_case__ , compute_metrics=_compute_metrics , train_dataset=snake_case__ , eval_dataset=snake_case__ , tokenizer=snake_case__ , )
# start training
trainer.train() | 706 | from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : int , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ) -> Dict:
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Any , snake_case__ : int = 1 , snake_case__ : int = 2_0_0_0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
_lowerCamelCase = self.unet.config.sample_size
_lowerCamelCase = (batch_size, 3, img_size, img_size)
_lowerCamelCase = self.unet
_lowerCamelCase = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
_lowerCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_lowerCamelCase = self.unet(snake_case__ , snake_case__ ).sample
_lowerCamelCase = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
_lowerCamelCase = model(snake_case__ , snake_case__ ).sample
_lowerCamelCase = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
_lowerCamelCase , _lowerCamelCase = output.prev_sample, output.prev_sample_mean
_lowerCamelCase = sample_mean.clamp(0 , 1 )
_lowerCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ ) | 234 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''ctrl'''
UpperCamelCase_ : List[Any] = ['''past_key_values''']
UpperCamelCase_ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , A_ : str=246_534 , A_ : Dict=256 , A_ : Optional[int]=1_280 , A_ : int=8_192 , A_ : List[Any]=48 , A_ : Optional[int]=16 , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : str=1E-6 , A_ : str=0.02 , A_ : Optional[Any]=True , **A_ : Tuple , ) -> Tuple:
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = dff
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
super().__init__(**A_ ) | 564 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : Union[str, Any] = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 527 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 527 | 1 |
'''simple docstring'''
def lowerCamelCase_ ( A_ ):
def merge(A_ , A_ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A_ ) <= 1:
return collection
__lowerCamelCase = len(A_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : str =input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : Any =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 316 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Dict ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["bs4"])
super().__init__(**UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Tuple) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Optional[Any] =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCamelCase__: Tuple =parent.find_all(child.name , recursive=UpperCAmelCase_)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(UpperCAmelCase_) else next(i for i, s in enumerate(UpperCAmelCase_ , 1) if s is child))
lowerCamelCase__: str =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =BeautifulSoup(UpperCAmelCase_ , "html.parser")
lowerCamelCase__: Dict =[]
lowerCamelCase__: Dict =[]
lowerCamelCase__: Union[str, Any] =[]
for element in html_code.descendants:
if type(UpperCAmelCase_) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
lowerCamelCase__: Union[str, Any] =html.unescape(UpperCAmelCase_).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.xpath_soup(UpperCAmelCase_)
stringaxtag_seq.append(UpperCAmelCase_)
stringaxsubs_seq.append(UpperCAmelCase_)
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError("Number of doc strings and xtags does not correspond")
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError("Number of doc strings and xsubs does not correspond")
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =""
for tagname, subs in zip(UpperCAmelCase_ , UpperCAmelCase_):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__(self : List[Any] , UpperCAmelCase_ : Optional[int]) ->BatchFeature:
'''simple docstring'''
lowerCamelCase__: Dict =False
# Check that strings has a valid type
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =True
elif isinstance(UpperCAmelCase_ , (list, tuple)):
if len(UpperCAmelCase_) == 0 or isinstance(html_strings[0] , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCAmelCase_)}.""")
lowerCamelCase__: Union[str, Any] =bool(isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase_)))
if not is_batched:
lowerCamelCase__: Dict =[html_strings]
# Get nodes + xpaths
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: Union[str, Any] =[]
for html_string in html_strings:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =self.get_three_from_single(UpperCAmelCase_)
nodes.append(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[]
for node, tag_list, sub_list in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =self.construct_xpath(UpperCAmelCase_ , UpperCAmelCase_)
xpath_strings.append(UpperCAmelCase_)
xpaths.append(UpperCAmelCase_)
# return as Dict
lowerCamelCase__: List[str] ={"nodes": nodes, "xpaths": xpaths}
lowerCamelCase__: List[str] =BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
return encoded_inputs
| 437 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : Tuple , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[Any] , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =size if size is not None else {"shortest_edge": 256}
lowerCamelCase__: Any =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Tuple =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: Any =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
lowerCamelCase__: str =do_resize
lowerCamelCase__: Any =size
lowerCamelCase__: Any =do_center_crop
lowerCamelCase__: int =crop_size
lowerCamelCase__: int =resample
lowerCamelCase__: Optional[int] =do_rescale
lowerCamelCase__: int =rescale_factor
lowerCamelCase__: Dict =offset
lowerCamelCase__: List[Any] =do_normalize
lowerCamelCase__: Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: List[str] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Dict =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" in size:
lowerCamelCase__: Union[str, Any] =get_resize_output_image_size(UpperCAmelCase_ , size["shortest_edge"] , default_to_square=UpperCAmelCase_)
elif "height" in size and "width" in size:
lowerCamelCase__: int =(size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =image.astype(np.floataa)
if offset:
lowerCamelCase__: List[Any] =image - (scale / 2)
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True.")
# All transformations expect numpy arrays.
lowerCamelCase__: List[str] =to_numpy_array(UpperCAmelCase_)
if do_resize:
lowerCamelCase__: Optional[Any] =self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_)
if do_center_crop:
lowerCamelCase__: str =self.center_crop(UpperCAmelCase_ , size=UpperCAmelCase_)
if do_rescale:
lowerCamelCase__: List[str] =self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , offset=UpperCAmelCase_)
if do_normalize:
lowerCamelCase__: Dict =self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_)
lowerCamelCase__: Any =to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_)
return image
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ) ->PIL.Image.Image:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: Optional[int] =resample if resample is not None else self.resample
lowerCamelCase__: Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: Union[str, Any] =offset if offset is not None else self.offset
lowerCamelCase__: Dict =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Optional[int] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__: List[Any] =size if size is not None else self.size
lowerCamelCase__: Optional[int] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: str =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: Optional[Any] =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
lowerCamelCase__: str =make_batched(UpperCAmelCase_)
lowerCamelCase__: Any =[
[
self._preprocess_image(
image=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , crop_size=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ , rescale_factor=UpperCAmelCase_ , offset=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , )
for img in video
]
for video in videos
]
lowerCamelCase__: int ={"pixel_values": videos}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 437 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.