code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase_ : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n'
UpperCAmelCase_ : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case=False , __snake_case=False , __snake_case=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([re.sub(_lowerCAmelCase , """""" , _lowerCAmelCase ) for x in predictions] )
_SCREAMING_SNAKE_CASE : List[str] = np.array([re.sub(_lowerCAmelCase , """""" , _lowerCAmelCase ) for x in references] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(_lowerCAmelCase )
if ignore_case:
_SCREAMING_SNAKE_CASE : List[str] = np.char.lower(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_SCREAMING_SNAKE_CASE : List[Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_SCREAMING_SNAKE_CASE : int = string.digits.maketrans("""""" , """""" , string.digits )
_SCREAMING_SNAKE_CASE : str = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase__ : List[Any] = False
if num < 0:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = -num
lowerCAmelCase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__a : Tuple = 1_0_0
__a : Union[str, Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __magic_name__ ( lowercase_ ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase = set()
UpperCamelCase = 42
UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __magic_name__ ( lowercase_ = 5000 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , lowercase_ ):
if len(partition(lowercase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 606 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__a : Optional[int] = logging.get_logger(__name__)
__a : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__a : Any = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__a : List[Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = RobertaTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="replace" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase = add_prefix_space
UpperCamelCase = "post_processor"
UpperCamelCase = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase = tuple(state["cls"] )
UpperCamelCase = False
if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase = add_prefix_space
UpperCamelCase = True
if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase = trim_offsets
UpperCamelCase = True
if changes_to_apply:
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) )
UpperCamelCase = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
UpperCamelCase = value
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 606 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase__ = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
lowerCAmelCase__ = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Source language id for translation."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Target language id for translation."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase ,os.path.join(_UpperCamelCase ,F"""{split}_results.json""" ) )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' ,_UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ):
assert hasattr(_UpperCamelCase ,_UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase ,_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf='''.ckpt''' in model_args.model_name_or_path ,config=_UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(_UpperCamelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase = SeqaSeqDataset
# Get datasets
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''train''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_train
else None
)
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''val''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''test''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase = (
build_compute_metrics_fn(data_args.task ,_UpperCamelCase ) if training_args.predict_with_generate else None
)
__lowerCamelCase = SeqaSeqTrainer(
model=_UpperCamelCase ,args=_UpperCamelCase ,data_args=_UpperCamelCase ,train_dataset=_UpperCamelCase ,eval_dataset=_UpperCamelCase ,data_collator=SeqaSeqDataCollator(
_UpperCamelCase ,_UpperCamelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=_UpperCamelCase ,tokenizer=_UpperCamelCase ,)
__lowerCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowerCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase = train_result.metrics
__lowerCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
__lowerCamelCase = data_args.n_val
__lowerCamelCase = round(metrics['''val_loss'''] ,4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowerCamelCase = trainer.predict(test_dataset=_UpperCamelCase ,metric_key_prefix='''test''' )
__lowerCamelCase = test_output.metrics
__lowerCamelCase = data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase = round(metrics['''test_loss'''] ,4 )
handle_metrics('''test''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
__lowerCamelCase = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
__lowerCamelCase = lmap(str.strip ,_UpperCamelCase )
write_txt_file(_UpperCamelCase ,os.path.join(training_args.output_dir ,'''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase ,os.path.join(training_args.output_dir ,'''all_results.json''' ) )
return all_metrics
def a__ ( _UpperCamelCase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowerCamelCase__ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowerCamelCase__ = {
"""ctrl""": 256,
}
lowerCamelCase__ = {
"""Pregnancy""": 16_8629,
"""Christianity""": 7675,
"""Explain""": 10_6423,
"""Fitness""": 6_3440,
"""Saving""": 6_3163,
"""Ask""": 2_7171,
"""Ass""": 9_5985,
"""Joke""": 16_3509,
"""Questions""": 4_5622,
"""Thoughts""": 4_9605,
"""Retail""": 5_2342,
"""Feminism""": 16_4338,
"""Writing""": 1_1992,
"""Atheism""": 19_2263,
"""Netflix""": 4_8616,
"""Computing""": 3_9639,
"""Opinion""": 4_3213,
"""Alone""": 4_4967,
"""Funny""": 5_8917,
"""Gaming""": 4_0358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 7_7138,
"""Diet""": 3_6206,
"""Legal""": 1_1859,
"""Norman""": 4939,
"""Tip""": 7_2689,
"""Weight""": 5_2343,
"""Movies""": 4_6273,
"""Running""": 2_3425,
"""Science""": 2090,
"""Horror""": 3_7793,
"""Confession""": 6_0572,
"""Finance""": 1_2250,
"""Politics""": 1_6360,
"""Scary""": 19_1985,
"""Support""": 1_2654,
"""Technologies""": 3_2516,
"""Teenage""": 6_6160,
"""Event""": 3_2769,
"""Learned""": 6_7460,
"""Notion""": 18_2770,
"""Wikipedia""": 3_7583,
"""Books""": 6665,
"""Extract""": 7_6050,
"""Confessions""": 10_2701,
"""Conspiracy""": 7_5932,
"""Links""": 6_3674,
"""Narcissus""": 15_0425,
"""Relationship""": 5_4766,
"""Relationships""": 13_4796,
"""Reviews""": 4_1671,
"""News""": 4256,
"""Translation""": 2_6820,
"""multilingual""": 12_8406,
}
def UpperCamelCase ( snake_case__ : Any ):
'''simple docstring'''
__snake_case :List[Any] = set()
__snake_case :Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case :Tuple = char
__snake_case :Optional[Any] = set(snake_case__ )
return pairs
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = CONTROL_CODES
def __init__( self , a__ , a__ , a__="<unk>" , **a__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(unk_token=a__ , **a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
__snake_case :Tuple = json.load(a__ )
__snake_case :Any = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
__snake_case :List[str] = merges_handle.read().split("""\n""" )[1:-1]
__snake_case :Union[str, Any] = [tuple(merge.split() ) for merge in merges]
__snake_case :str = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case :Optional[int] = {}
@property
def __lowercase ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , a__ ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__snake_case :Tuple = tuple(a__ )
__snake_case :Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__snake_case :Union[str, Any] = get_pairs(a__ )
if not pairs:
return token
while True:
__snake_case :List[Any] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case :Optional[Any] = bigram
__snake_case :int = []
__snake_case :List[str] = 0
while i < len(a__ ):
try:
__snake_case :Tuple = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case :List[Any] = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case :Optional[Any] = tuple(a__ )
__snake_case :Dict = new_word
if len(a__ ) == 1:
break
else:
__snake_case :int = get_pairs(a__ )
__snake_case :Optional[Any] = """@@ """.join(a__ )
__snake_case :List[str] = word[:-4]
__snake_case :Any = word
return word
def __lowercase ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Dict = []
__snake_case :Optional[int] = re.findall(R"""\S+\n?""" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(""" """ ) ) )
return split_tokens
def __lowercase ( self , a__ ) -> Tuple:
'''simple docstring'''
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __lowercase ( self , a__ ) -> List[Any]:
'''simple docstring'''
return self.decoder.get(a__ , self.unk_token )
def __lowercase ( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :Optional[int] = """ """.join(a__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __lowercase ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case :Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
__snake_case :List[str] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__snake_case :Optional[Any] = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 455 |
from collections.abc import Callable
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ = None ) -> None:
'''simple docstring'''
__snake_case :list = []
# Stores indexes of each item for supporting updates and deletion.
__snake_case :dict = {}
# Stores current size of heap.
__snake_case :List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__snake_case :Optional[Any] = key or (lambda a__ : x)
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :int = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case , __snake_case :Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__snake_case , __snake_case :Tuple = self.arr[j], self.arr[i]
def __lowercase ( self , a__ , a__ ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self , a__ ) -> int:
'''simple docstring'''
__snake_case :int = self._left(a__ )
__snake_case :Optional[int] = self._right(a__ )
__snake_case :List[Any] = i
if left is not None and not self._cmp(a__ , a__ ):
__snake_case :int = left
if right is not None and not self._cmp(a__ , a__ ):
__snake_case :Tuple = right
return valid_parent
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :int = self._parent(a__ )
while parent is not None and not self._cmp(a__ , a__ ):
self._swap(a__ , a__ )
__snake_case , __snake_case :str = parent, self._parent(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :List[Any] = self._get_valid_parent(a__ )
while valid_parent != index:
self._swap(a__ , a__ )
__snake_case , __snake_case :Optional[int] = valid_parent, self._get_valid_parent(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :List[Any] = self.pos_map[item]
__snake_case :Optional[Any] = [item, self.key(a__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :Dict = self.pos_map[item]
del self.pos_map[item]
__snake_case :Dict = self.arr[self.size - 1]
__snake_case :List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(a__ )] )
else:
__snake_case :Optional[Any] = [item, self.key(a__ )]
__snake_case :str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
__snake_case :Optional[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 1 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =model.config
__UpperCamelCase =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
__UpperCamelCase =MBartConfig(
is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , add_cross_attention=__UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCamelCase , add_final_layer_norm=__UpperCamelCase , )
return encoder_config, decoder_config
def lowerCAmelCase (__UpperCamelCase : List[str] ):
"""simple docstring"""
if "encoder.model" in name:
__UpperCamelCase =name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__UpperCamelCase =name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__UpperCamelCase ='''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__UpperCamelCase ='''encoder.layernorm.bias'''
return name
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =int(key_split[5] )
__UpperCamelCase =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[dim : dim * 2, :]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[:dim]
__UpperCamelCase =val[dim : dim * 2]
__UpperCamelCase =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=False ):
"""simple docstring"""
__UpperCamelCase =DonutModel.from_pretrained(__UpperCamelCase ).eval()
# load HuggingFace model
__UpperCamelCase , __UpperCamelCase =get_configs(__UpperCamelCase )
__UpperCamelCase =DonutSwinModel(__UpperCamelCase )
__UpperCamelCase =MBartForCausalLM(__UpperCamelCase )
__UpperCamelCase =VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
model.eval()
__UpperCamelCase =original_model.state_dict()
__UpperCamelCase =convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify results on scanned document
__UpperCamelCase =load_dataset('''hf-internal-testing/example-documents''' )
__UpperCamelCase =dataset['''test'''][0]['''image'''].convert('''RGB''' )
__UpperCamelCase =XLMRobertaTokenizerFast.from_pretrained(__UpperCamelCase , from_slow=__UpperCamelCase )
__UpperCamelCase =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__UpperCamelCase =DonutProcessor(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCamelCase ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCamelCase ='''When is the coffee break?'''
__UpperCamelCase =task_prompt.replace('''{user_input}''' , __UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCamelCase ='''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCamelCase ='''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCamelCase ='''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCamelCase ='''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCamelCase ='''hello world'''
else:
raise ValueError('''Model name not supported''' )
__UpperCamelCase =original_model.decoder.tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
__UpperCamelCase =original_model.encoder.model.patch_embed(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase =model.encoder.embeddings(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
__UpperCamelCase =original_model.encoder(__UpperCamelCase )
__UpperCamelCase =model.encoder(__UpperCamelCase ).last_hidden_state
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
__UpperCamelCase =original_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).logits
__UpperCamelCase =model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
__lowercase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 | """simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BlenderbotSmallTokenizer
lowercase__ = False
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
super().setUp()
__UpperCamelCase =['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__UpperCamelCase ={'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase ='''adapt act apte'''
__UpperCamelCase ='''adapt act apte'''
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''adapt act apte'''
__UpperCamelCase =['''adapt''', '''act''', '''ap@@''', '''te''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__UpperCamelCase ='''I am a small frog.'''
__UpperCamelCase =tok([src_text] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
__UpperCamelCase =tok.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__UpperCamelCase ='''I am a small frog .'''
__UpperCamelCase ='''.'''
__UpperCamelCase =tok(UpperCamelCase__ )['''input_ids''']
__UpperCamelCase =tok(UpperCamelCase__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 296 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = False, False, False
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = None
a = True
a = True
a = None
# Automatically constructed
a = "dict"
a = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
a = field(default="Audio" , init=A__ , repr=A__ )
def __call__( self : Optional[int] ) -> List[str]:
return self.pa_type
def lowercase_ ( self : str , __lowerCamelCase : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE__ = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE__ = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
SCREAMING_SNAKE_CASE__ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767
SCREAMING_SNAKE_CASE__ = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowercase_ ( self : Dict , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
SCREAMING_SNAKE_CASE__ = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
SCREAMING_SNAKE_CASE__ = token_per_repo_id or {}
SCREAMING_SNAKE_CASE__ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE__ = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE__ = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sf.read(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sf.read(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = array.T
if self.mono:
SCREAMING_SNAKE_CASE__ = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE__ = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase_ ( self : Optional[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowercase_ ( self : str , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
SCREAMING_SNAKE_CASE__ = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE__ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE__ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 493 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 493 | 1 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A__ = hex_num[0] == '-'
if is_negative:
A__ = hex_num[1:]
try:
A__ = int(lowerCAmelCase__ ,16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A__ = ''
while int_num > 0:
A__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
from maths.prime_check import is_prime
def __lowerCamelCase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : Dict = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *_snake_case , **_snake_case ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 408 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_torch
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,torch.tensor(__lowerCamelCase ),torch.tensor(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCamelCase ):
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_tf
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,tf.convert_to_tensor(__lowerCamelCase ),tf.convert_to_tensor(__lowerCamelCase ),return_tensors='''tf''',)
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = np.random.randint(0,2,size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(__lowerCamelCase )]
A__ = [torch.tensor(__lowerCamelCase )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = image_processor(__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
| 190 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCamelCase (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = AutoencoderKL
lowercase__ = """sample"""
lowercase__ = 1E-2
@property
def __lowerCamelCase ( self ):
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
return (3, 32, 32)
def __lowerCamelCase ( self ):
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def __lowerCamelCase ( self ):
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __lowerCamelCase ( self ):
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
@slow
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'''
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_=False ):
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_=False ):
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 718 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'tester'
__snake_case = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__snake_case = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) , 0 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(' ' , '' ) , SCREAMING_SNAKE_CASE_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def __lowerCamelCase ( self ):
pass
| 345 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCamelCase__ : CLIPSegForImageSegmentation, UpperCamelCase__ : CLIPSegProcessor, UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : CLIPTextModel, UpperCamelCase__ : CLIPTokenizer, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCamelCase__ : StableDiffusionSafetyChecker, UpperCamelCase__ : CLIPImageProcessor, ) -> int:
super().__init__()
if hasattr(scheduler.config, 'steps_offset' ) and scheduler.config.steps_offset != 1:
_A = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = 1
_A = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config, 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_A = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = True
_A = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase__, segmentation_processor=UpperCamelCase__, vae=UpperCamelCase__, text_encoder=UpperCamelCase__, tokenizer=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__, safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
self.enable_attention_slicing(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__, UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : int ) -> Dict:
if self.device != torch.device('meta' ) or not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int], UpperCamelCase__ : Union[str, List[str]], UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCamelCase__ : str, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 50, UpperCamelCase__ : float = 7.5, UpperCamelCase__ : Optional[Union[str, List[str]]] = None, UpperCamelCase__ : Optional[int] = 1, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : Optional[torch.Generator] = None, UpperCamelCase__ : Optional[torch.FloatTensor] = None, UpperCamelCase__ : Optional[str] = "pil", UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCamelCase__ : int = 1, **UpperCamelCase__ : Dict, ) -> Tuple:
_A = self.segmentation_processor(
text=[text], images=[image], padding='max_length', return_tensors='pt' ).to(self.device )
_A = self.segmentation_model(**UpperCamelCase__ )
_A = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_A = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_A = StableDiffusionInpaintPipeline(
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, )
return inpainting_pipeline(
prompt=UpperCamelCase__, image=UpperCamelCase__, mask_image=UpperCamelCase__, height=UpperCamelCase__, width=UpperCamelCase__, num_inference_steps=UpperCamelCase__, guidance_scale=UpperCamelCase__, negative_prompt=UpperCamelCase__, num_images_per_prompt=UpperCamelCase__, eta=UpperCamelCase__, generator=UpperCamelCase__, latents=UpperCamelCase__, output_type=UpperCamelCase__, return_dict=UpperCamelCase__, callback=UpperCamelCase__, callback_steps=UpperCamelCase__, )
| 107 | '''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
return EnvironmentCommand()
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : ArgumentParser ) -> List[str]:
_A = parser.add_parser('env' )
download_parser.set_defaults(func=UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_A = huggingface_hub.__version__
_A = 'not installed'
_A = 'NA'
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = 'not installed'
if is_transformers_available():
import transformers
_A = transformers.__version__
_A = 'not installed'
if is_accelerate_available():
import accelerate
_A = accelerate.__version__
_A = 'not installed'
if is_xformers_available():
import xformers
_A = xformers.__version__
_A = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : List[Any] ) -> Dict:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 107 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( a_ ):
_A : Optional[Any] = 'lilt'
def __init__( self , snake_case__=3_05_22 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=None , snake_case__=4 , snake_case__=10_24 , **snake_case__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = classifier_dropout
UpperCAmelCase = channel_shrink_ratio
UpperCAmelCase = max_ad_position_embeddings
| 378 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ : int = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = 1000
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCAmelCase , )
return preprocessor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCAmelCase = sorted(set(lowerCAmelCase ) )
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = {b: str(lowerCAmelCase ) for b, i in zip(lowerCAmelCase , range(lowerCAmelCase ) )}
UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = """efficientnet.""" + item[1]
UpperCAmelCase = """classifier.weight"""
UpperCAmelCase = """classifier.bias"""
return key_mapping
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(lowerCAmelCase ) )
else:
UpperCAmelCase = torch.from_numpy(lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = model_classes[model_name](
include_top=lowerCAmelCase , weights="""imagenet""" , input_tensor=lowerCAmelCase , input_shape=lowerCAmelCase , pooling=lowerCAmelCase , classes=1000 , classifier_activation="""softmax""" , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(lowerCAmelCase )
UpperCAmelCase = EfficientNetForImageClassification(lowerCAmelCase ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCAmelCase = rename_keys(lowerCAmelCase )
replace_params(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(lowerCAmelCase )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(lowerCAmelCase )
UpperCAmelCase = np.expand_dims(lowerCAmelCase , axis=0 )
UpperCAmelCase = original_model.predict(lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase ):
os.mkdir(lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase )
preprocessor.save_pretrained(lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCAmelCase = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase )
hf_model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 378 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Any = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Any = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
re.sub('<n>' , '' , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 549 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a ( __snake_case , __snake_case ):
@register_to_config
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : int = 20 , __SCREAMING_SNAKE_CASE : int = 768 , __SCREAMING_SNAKE_CASE : int=77 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : str = "silu" , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "linear" , __SCREAMING_SNAKE_CASE : Optional[str] = "prd" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> List[Any]:
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = additional_embeddings
lowerCamelCase_ = time_embed_dim or inner_dim
lowerCamelCase_ = embedding_proj_dim or embedding_dim
lowerCamelCase_ = clip_embed_dim or embedding_dim
lowerCamelCase_ = Timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 )
lowerCamelCase_ = TimestepEmbedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , out_dim=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
lowerCamelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
lowerCamelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , __SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
lowerCamelCase_ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , activation_fn='gelu' , attention_bias=__SCREAMING_SNAKE_CASE , )
for d in range(__SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
lowerCamelCase_ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , __SCREAMING_SNAKE_CASE , persistent=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , __SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase ( self : List[str] ) -> Dict[str, AttentionProcessor]:
lowerCamelCase_ = {}
def fn_recursive_add_processors(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, AttentionProcessor] ):
if hasattr(__SCREAMING_SNAKE_CASE , 'set_processor' ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__SCREAMING_SNAKE_CASE )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : str ):
if hasattr(__SCREAMING_SNAKE_CASE , 'set_processor' ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.set_processor(__SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> int:
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.BoolTensor] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[Any]:
lowerCamelCase_ = hidden_states.shape[0]
lowerCamelCase_ = timestep
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps * torch.ones(__SCREAMING_SNAKE_CASE , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ = self.time_proj(__SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ = self.time_embedding(__SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
lowerCamelCase_ = self.embedding_proj_norm(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.embedding_proj(__SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ = self.encoder_hidden_states_proj(__SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase_ = self.proj_in(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ = []
lowerCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(__SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ = hidden_states[:, None, :]
lowerCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(__SCREAMING_SNAKE_CASE , -1 , -1 )
additional_embeds.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.cat(
__SCREAMING_SNAKE_CASE , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ = F.pad(
__SCREAMING_SNAKE_CASE , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
lowerCamelCase_ = F.pad(__SCREAMING_SNAKE_CASE , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ = self.norm_in(__SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
lowerCamelCase_ = block(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.norm_out(__SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
lowerCamelCase_ = hidden_states[:, -1]
else:
lowerCamelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ = self.proj_to_clip_embeddings(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 549 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 'vit_mae'
def __init__( self: List[Any] , __lowerCAmelCase: Any=768 , __lowerCAmelCase: List[str]=12 , __lowerCAmelCase: Optional[int]=12 , __lowerCAmelCase: Tuple=3_072 , __lowerCAmelCase: List[Any]="gelu" , __lowerCAmelCase: Dict=0.0 , __lowerCAmelCase: Tuple=0.0 , __lowerCAmelCase: Any=0.02 , __lowerCAmelCase: List[Any]=1E-12 , __lowerCAmelCase: List[str]=224 , __lowerCAmelCase: Optional[Any]=16 , __lowerCAmelCase: Union[str, Any]=3 , __lowerCAmelCase: Tuple=True , __lowerCAmelCase: Union[str, Any]=16 , __lowerCAmelCase: Optional[int]=512 , __lowerCAmelCase: int=8 , __lowerCAmelCase: int=2_048 , __lowerCAmelCase: str=0.75 , __lowerCAmelCase: Union[str, Any]=False , **__lowerCAmelCase: List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = decoder_num_attention_heads
__UpperCAmelCase = decoder_hidden_size
__UpperCAmelCase = decoder_num_hidden_layers
__UpperCAmelCase = decoder_intermediate_size
__UpperCAmelCase = mask_ratio
__UpperCAmelCase = norm_pix_loss
| 286 | import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __lowerCAmelCase ( A_ : int ) -> Optional[Any]:
__UpperCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__UpperCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__UpperCAmelCase = 4
__UpperCAmelCase = 48
__UpperCAmelCase = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__UpperCAmelCase = [6, 6, 6, 6]
__UpperCAmelCase = 60
__UpperCAmelCase = [6, 6, 6, 6]
__UpperCAmelCase = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__UpperCAmelCase = 4
__UpperCAmelCase = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = 1_26
__UpperCAmelCase = 7
__UpperCAmelCase = 2_55.0
__UpperCAmelCase = ""
return config
def __lowerCAmelCase ( A_ : Optional[int] , A_ : str ) -> Dict:
if "patch_embed.proj" in name and "layers" not in name:
__UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
__UpperCAmelCase = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
__UpperCAmelCase = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
__UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
__UpperCAmelCase = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
__UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
__UpperCAmelCase = "layernorm.bias"
if "conv_first" in name:
__UpperCAmelCase = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__UpperCAmelCase = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__UpperCAmelCase = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
__UpperCAmelCase = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
__UpperCAmelCase = name.replace("upsample.2" , "upsample.convolution_1" )
__UpperCAmelCase = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
__UpperCAmelCase = name.replace("upsample.0.weight" , "upsample.conv.weight" )
__UpperCAmelCase = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
__UpperCAmelCase = "swin2sr." + name
return name
def __lowerCAmelCase ( A_ : Dict , A_ : Tuple ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase = orig_state_dict.pop(A_ )
if "qkv" in key:
__UpperCAmelCase = key.split("." )
__UpperCAmelCase = int(key_split[1] )
__UpperCAmelCase = int(key_split[4] )
__UpperCAmelCase = config.embed_dim
if "weight" in key:
__UpperCAmelCase = val[:dim, :]
__UpperCAmelCase = val[dim : dim * 2, :]
__UpperCAmelCase = val[-dim:, :]
else:
__UpperCAmelCase = val[:dim]
__UpperCAmelCase = val[dim : dim * 2]
__UpperCAmelCase = val[-dim:]
pass
else:
__UpperCAmelCase = val
return orig_state_dict
def __lowerCAmelCase ( A_ : Any , A_ : str , A_ : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = get_config(A_ )
__UpperCAmelCase = SwinaSRForImageSuperResolution(A_ )
model.eval()
__UpperCAmelCase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
__UpperCAmelCase = convert_state_dict(A_ , A_ )
__UpperCAmelCase , __UpperCAmelCase = model.load_state_dict(A_ , strict=A_ )
if len(A_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(A_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
__UpperCAmelCase = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
__UpperCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__UpperCAmelCase = 1_26 if "Jpeg" in checkpoint_url else 2_56
__UpperCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__UpperCAmelCase = transforms(A_ ).unsqueeze(0 )
if config.num_channels == 1:
__UpperCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__UpperCAmelCase = model(A_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__UpperCAmelCase = torch.Size([1, 3, 5_12, 5_12] )
__UpperCAmelCase = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__UpperCAmelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCAmelCase = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__UpperCAmelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCAmelCase = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__UpperCAmelCase = torch.Size([1, 3, 5_12, 5_12] )
__UpperCAmelCase = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__UpperCAmelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCAmelCase = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , A_ , atol=1e-3 )
print("Looks ok!" )
__UpperCAmelCase = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
__UpperCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
a_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 286 | 1 |
from typing import Any
def __lowerCAmelCase ( __magic_name__ ):
if not input_list:
return []
_lowercase: List[str] = [input_list.count(lowerCAmelCase_ ) for value in input_list]
_lowercase: Any = max(lowerCAmelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def snake_case ( lowerCAmelCase_ ) -> str:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_snake_case = model_type_to_module_name(lowerCAmelCase_ )
_snake_case = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '''__name__''' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_snake_case = importlib.import_module('''transformers''' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> int:
_snake_case = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCAmelCase_ )
class UpperCAmelCase :
def __init__( self : Any ):
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __UpperCAmelCase ( cls : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = kwargs.pop('''config''' , __lowerCamelCase )
_snake_case = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
_snake_case = True
_snake_case , _snake_case = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
_snake_case = config_dict.get('''image_processor_type''' , __lowerCamelCase )
_snake_case = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_snake_case = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_snake_case = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_snake_case = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
_snake_case = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_snake_case = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_snake_case = image_processor_class_from_name(__lowerCamelCase )
_snake_case = image_processor_auto_map is not None
_snake_case = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
_snake_case = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
_snake_case = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
_snake_case = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
_snake_case = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 103 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_A = False
class _lowerCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ : int = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase__ : Dict = generator.manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase__ : List[Any] = '''cyberpunk 2077'''
lowerCAmelCase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : int = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase__ : int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase__ : str = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : Dict = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowerCAmelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase__ : Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
lowerCAmelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 718 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_A = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_A = logging.WARNING
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = os.getenv("""DATASETS_VERBOSITY""" , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase_ ( ) -> str:
return __name__.split(""".""" )[0]
def lowercase_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase_ ( ) -> None:
# Apply our default configuration to the library root logger.
lowerCAmelCase__ : Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( __UpperCAmelCase = None ) -> logging.Logger:
if name is None:
lowerCAmelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( __UpperCAmelCase ) -> None:
_get_library_root_logger().setLevel(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> str:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[int] = False
def lowercase_ ( ) -> None:
lowerCAmelCase__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : Dict ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
def empty_fn(*UpperCamelCase : Optional[int] , **UpperCamelCase : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return
_A = True
class _lowerCamelCase :
def __call__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=False , **UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase , **UpperCamelCase )
else:
return EmptyTqdm(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_A = _tqdm_cls()
def lowercase_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ) -> Any:
global _tqdm_active
lowerCAmelCase__ : List[str] = True
def lowercase_ ( ) -> Tuple:
global _tqdm_active
lowerCAmelCase__ : List[str] = False
| 507 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__(self : List[str] , __UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = data
UpperCAmelCase__ = None
class A :
def __init__(self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __iter__(self : Optional[int] ) -> Iterator[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.head
while self.head:
yield node.data
UpperCAmelCase__ = node.next
if node == self.head:
break
def __len__(self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__(self : str ) -> Any:
"""simple docstring"""
return "->".join(str(__UpperCAmelCase ) for item in iter(self ) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , __UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , __UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ = Node(__UpperCAmelCase )
if self.head is None:
UpperCAmelCase__ = new_node # first node points itself
UpperCAmelCase__ = UpperCAmelCase__ = new_node
elif index == 0: # insert at head
UpperCAmelCase__ = self.head
UpperCAmelCase__ = UpperCAmelCase__ = new_node
else:
UpperCAmelCase__ = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase__ = new_node
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase_ (self : str , __UpperCAmelCase : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ = self.head
if self.head == self.tail: # just one node
UpperCAmelCase__ = UpperCAmelCase__ = None
elif index == 0: # delete head node
UpperCAmelCase__ = self.tail.next.next
UpperCAmelCase__ = self.head.next
else:
UpperCAmelCase__ = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase__ = temp
return delete_node.data
def lowercase_ (self : Union[str, Any] ) -> bool:
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = CircularLinkedList()
assert len(__A ) == 0
assert circular_linked_list.is_empty() is True
assert str(__A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__A ) == i
circular_linked_list.insert_nth(__A, i + 1 )
assert str(__A ) == "->".join(str(__A ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__A ) == "->".join(str(__A ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(__A ) == "->".join(str(__A ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__A ) == "->".join(str(__A ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(__A ) == "->".join(str(__A ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase__ = 4
UpperCamelCase__ = 3
class A ( UpperCAmelCase_ ):
pass
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
for shard in shards:
for i in range(__A ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = int(os.environ["RANK"] )
UpperCAmelCase__ = int(os.environ["WORLD_SIZE"] )
UpperCAmelCase__ = ArgumentParser()
parser.add_argument("--streaming", type=__A )
parser.add_argument("--local_rank", type=__A )
parser.add_argument("--num_workers", type=__A, default=0 )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.streaming
UpperCAmelCase__ = args.num_workers
UpperCAmelCase__ = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(__A )]}
UpperCAmelCase__ = IterableDataset.from_generator(__A, gen_kwargs=__A )
if not streaming:
UpperCAmelCase__ = Dataset.from_list(list(__A ) )
UpperCAmelCase__ = split_dataset_by_node(__A, rank=__A, world_size=__A )
UpperCAmelCase__ = torch.utils.data.DataLoader(__A, num_workers=__A )
UpperCAmelCase__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 486 | 1 |
'''simple docstring'''
import re
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : bool , __lowerCamelCase : str ):
'''simple docstring'''
try:
_UpperCAmelCase : int =split_input(__lowerCamelCase )
if upper:
_UpperCAmelCase : List[Any] =''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_UpperCAmelCase : Tuple =''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return to_simple_case(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
try:
_UpperCAmelCase : Optional[Any] =to_simple_case(__lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : bool ):
'''simple docstring'''
return to_complex_case(__lowerCamelCase , __lowerCamelCase , '_' )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : bool ):
'''simple docstring'''
return to_complex_case(__lowerCamelCase , __lowerCamelCase , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 331 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : Union[str, Any] =add_prefix_space
_UpperCAmelCase : Optional[Any] =pre_tok_class(**snake_case)
_UpperCAmelCase : Union[str, Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : List[Any] ='post_processor'
_UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : int =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : List[str] =tuple(state['cls'])
_UpperCAmelCase : str =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Optional[int] =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : Union[str, Any] =trim_offsets
_UpperCAmelCase : Tuple =True
if changes_to_apply:
_UpperCAmelCase : str =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : Any =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Any =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 331 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
snake_case = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ["""input_ids""", """attention_mask"""]
A_ : List[str] = TaTokenizer
A_ : List[int] = []
def __init__( self : List[str] , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Optional[int]="</s>" , a__ : List[Any]="<unk>" , a__ : Union[str, Any]="<pad>" , a__ : str=100 , a__ : List[str]=None , **a__ : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : int = [F'''<extra_id_{i}>''' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase__ : str = len(set(filter(lambda a__ : bool("extra_id_" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
a__ , tokenizer_file=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , **a__ , )
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
lowerCAmelCase__ : Any = extra_ids
@staticmethod
def _A ( a__ : Optional[int] , a__ : Optional[Any] , a__ : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase__ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
def _A ( self : List[Any] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _A ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : int = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase__ : Union[str, Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _A ( self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A ( self : int ):
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def _A ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(a__ ) for token in self.get_sentinel_tokens()]
| 378 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : List[str] = AlbertTokenizer
A_ : List[Any] = AlbertTokenizerFast
A_ : List[str] = True
A_ : Any = True
A_ : List[Any] = True
def _A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Tuple , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "this is a test"
lowerCAmelCase__ : Optional[Any] = "this is a test"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "<pad>"
lowerCAmelCase__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(a__ ) , 3_0000 )
def _A ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _A ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ : Dict = tokenizer.tokenize(a__ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(a__ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertTokenizer(a__ , keep_accents=a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [48, 25, 21, 1289] )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a__ )
lowerCAmelCase__ : List[str] = tokenizer.encode("sequence builders" )
lowerCAmelCase__ : Optional[int] = tokenizer.encode("multi-sequence build" )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 378 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , snake_case__ ).groups()[0]
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None ) -> Any:
lowercase_ = file_names
lowercase_ = image_transform
lowercase_ = label_to_id
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
lowercase_ = self.file_names[idx]
lowercase_ = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
lowercase_ = raw_image.convert('''RGB''' )
if self.image_transform is not None:
lowercase_ = self.image_transform(SCREAMING_SNAKE_CASE_ )
lowercase_ = extract_label(SCREAMING_SNAKE_CASE_ )
if self.label_to_id is not None:
lowercase_ = self.label_to_id[label]
return {"image": image, "label": label}
def a ( snake_case__: Any , snake_case__: Union[str, Any] ):
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
lowercase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config['''lr''']
lowercase_ = int(config['''num_epochs'''] )
lowercase_ = int(config['''seed'''] )
lowercase_ = int(config['''batch_size'''] )
lowercase_ = config['''image_size''']
if not isinstance(snake_case__ , (list, tuple) ):
lowercase_ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
lowercase_ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase_ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase_ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase_ = os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Grab all the image filenames
lowercase_ = [os.path.join(args.data_dir , snake_case__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
lowercase_ = [extract_label(snake_case__ ) for fname in file_names]
lowercase_ = list(set(snake_case__ ) )
id_to_label.sort()
lowercase_ = {lbl: i for i, lbl in enumerate(snake_case__ )}
# Set the seed before splitting the data.
np.random.seed(snake_case__ )
torch.manual_seed(snake_case__ )
torch.cuda.manual_seed_all(snake_case__ )
# Split our filenames between train and validation
lowercase_ = np.random.permutation(len(snake_case__ ) )
lowercase_ = int(0.8 * len(snake_case__ ) )
lowercase_ = random_perm[:cut]
lowercase_ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase_ = Compose([RandomResizedCrop(snake_case__ , scale=(0.5, 1.0) ), ToTensor()] )
lowercase_ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case__ , label_to_id=snake_case__ )
# For evaluation, we use a deterministic Resize
lowercase_ = Compose([Resize(snake_case__ ), ToTensor()] )
lowercase_ = PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case__ , label_to_id=snake_case__ )
# Instantiate dataloaders.
lowercase_ = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
lowercase_ = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = create_model('''resnet50d''' , pretrained=snake_case__ , num_classes=len(snake_case__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase_ = False
for param in model.get_classifier().parameters():
lowercase_ = True
# We normalize the batches of images to be a bit faster.
lowercase_ = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
lowercase_ = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase_ = OneCycleLR(optimizer=snake_case__ , max_lr=snake_case__ , epochs=snake_case__ , steps_per_epoch=len(snake_case__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowercase_ = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase_ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase_ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase_ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase_ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase_ = os.path.splitext(snake_case__ )[0]
if "epoch" in training_difference:
lowercase_ = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
lowercase_ = None
else:
lowercase_ = int(training_difference.replace('''step_''' , '''''' ) )
lowercase_ = resume_step // len(snake_case__ )
resume_step -= starting_epoch * len(snake_case__ )
# Now we train the model
for epoch in range(snake_case__ , snake_case__ ):
model.train()
if args.with_tracking:
lowercase_ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase_ = accelerator.skip_first_batches(snake_case__ , snake_case__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase_ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase_ = (batch['''image'''] - mean) / std
lowercase_ = model(snake_case__ )
lowercase_ = torch.nn.functional.cross_entropy(snake_case__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase_ = os.path.join(args.output_dir , snake_case__ )
accelerator.save_state(snake_case__ )
model.eval()
lowercase_ = 0
lowercase_ = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase_ = (batch['''image'''] - mean) / std
with torch.no_grad():
lowercase_ = model(snake_case__ )
lowercase_ = outputs.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''label''']) )
lowercase_ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase_ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
if checkpointing_steps == "epoch":
lowercase_ = F'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase_ = os.path.join(args.output_dir , snake_case__ )
accelerator.save_state(snake_case__ )
if args.with_tracking:
accelerator.end_training()
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=snake_case__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=snake_case__ , default=snake_case__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=snake_case__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case__ , default=snake_case__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase_ = parser.parse_args()
lowercase_ = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 701 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
# TODO Update this
__a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'esm'
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_2_6 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Union[str, Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase_ = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _lowercase ( self : Any ) -> str:
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__:
"""simple docstring"""
a :str = None
a :bool = True
a :bool = False
a :bool = False
a :bool = False
a :float = 0
a :bool = True
a :bool = False
a :int = 128
a :"TrunkConfig" = None
def _lowercase ( self : Optional[Any] ) -> List[str]:
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
lowercase_ = TrunkConfig(**self.trunk )
def _lowercase ( self : Dict ) -> int:
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class lowercase__:
"""simple docstring"""
a :int = 48
a :int = 1_024
a :int = 128
a :int = 32
a :int = 32
a :int = 32
a :float = 0
a :float = 0
a :bool = False
a :int = 4
a :Optional[int] = 128
a :"StructureModuleConfig" = None
def _lowercase ( self : Tuple ) -> Dict:
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class lowercase__:
"""simple docstring"""
a :int = 384
a :int = 128
a :int = 16
a :int = 128
a :int = 12
a :int = 4
a :int = 8
a :float = 0.1
a :int = 8
a :int = 1
a :int = 2
a :int = 7
a :int = 10
a :float = 1e-8
a :float = 1e5
def _lowercase ( self : Tuple ) -> Tuple:
return asdict(self )
def a ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 409 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : List[str] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 2_56
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__lowerCamelCase = np.sum(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = x[i] / self.k
self.sk += prk
__lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase = int(last % last )
__lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __A ( self : str ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE__ : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 298 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : List[Any], _snake_case : Optional[Any]=1_3, _snake_case : Tuple=7, _snake_case : Any=True, _snake_case : Any=True, _snake_case : List[Any]=True, _snake_case : Union[str, Any]=True, _snake_case : List[str]=9_9, _snake_case : List[Any]=3_2, _snake_case : Union[str, Any]=5, _snake_case : Dict=4, _snake_case : Dict=3_7, _snake_case : Dict="gelu", _snake_case : Union[str, Any]=0.1, _snake_case : Dict=0.1, _snake_case : Tuple=5_1_2, _snake_case : List[str]=1_6, _snake_case : Union[str, Any]=2, _snake_case : Dict=0.0_2, _snake_case : Any=4, ) ->int:
snake_case__ : List[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : str = is_training
snake_case__ : Dict = use_attention_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = num_choices
def lowercase_ ( self : Optional[Any] ) ->str:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : Dict = None
if self.use_attention_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_snake_case, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self : Union[str, Any] ) ->Union[str, Any]:
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : str = FlaxRoFormerModelTester(self )
@slow
def lowercase_ ( self : List[str] ) ->Dict:
for model_class_name in self.all_model_classes:
snake_case__ : Tuple = model_class_name.from_pretrained('junnyu/roformer_chinese_small', from_pt=_snake_case )
snake_case__ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case__ : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Tuple = model(_snake_case )[0]
snake_case__ : Optional[int] = 5_0_0_0_0
snake_case__ : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape, _snake_case )
snake_case__ : Union[str, Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], _snake_case, atol=1e-4 ) )
| 243 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a_ :Dict = logging.getLogger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """summarization"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self : Dict, _snake_case : Dict, **_snake_case : str ) ->List[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case__ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_snake_case, num_labels=_snake_case, mode=self.mode, **_snake_case )
use_task_specific_params(self.model, 'summarization' )
save_git_info(self.hparams.output_dir )
snake_case__ : List[str] = Path(self.output_dir ) / 'metrics.json'
snake_case__ : Tuple = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path )
snake_case__ : List[str] = 0
snake_case__ : int = defaultdict(_snake_case )
snake_case__ : Optional[Any] = self.config.model_type
snake_case__ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
snake_case__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case__ : Union[str, Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
snake_case__ : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case__ : Union[str, Any] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case__ : Optional[Any] = get_git_info()['repo_sha']
snake_case__ : Any = hparams.num_workers
snake_case__ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, _snake_case ):
snake_case__ : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case__ : Optional[Any] = self.decoder_start_token_id
snake_case__ : int = (
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
snake_case__ : Tuple = False
snake_case__ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case__ : Any = self.hparams.eval_max_gen_length
else:
snake_case__ : List[Any] = self.model.config.max_length
snake_case__ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self : Tuple, _snake_case : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]:
snake_case__ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case, Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir ) / 'tok_batch.json' )
snake_case__ : Optional[int] = True
return readable_batch
def lowercase_ ( self : List[Any], _snake_case : Dict, **_snake_case : str ) ->int:
return self.model(_snake_case, **_snake_case )
def lowercase_ ( self : str, _snake_case : List[int] ) ->List[str]:
snake_case__ : int = self.tokenizer.batch_decode(
_snake_case, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip, _snake_case )
def lowercase_ ( self : List[str], _snake_case : dict ) ->Tuple:
snake_case__ : List[str] = self.tokenizer.pad_token_id
snake_case__ , snake_case__ : Any = batch['input_ids'], batch['attention_mask']
snake_case__ : Optional[int] = batch['labels']
if isinstance(self.model, _snake_case ):
snake_case__ : Union[str, Any] = self.model._shift_right(_snake_case )
else:
snake_case__ : List[Any] = shift_tokens_right(_snake_case, _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case__ : Optional[int] = decoder_input_ids
self.save_readable_batch(_snake_case )
snake_case__ : List[str] = self(_snake_case, attention_mask=_snake_case, decoder_input_ids=_snake_case, use_cache=_snake_case )
snake_case__ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case__ : Any = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
snake_case__ : Optional[Any] = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1] ), tgt_ids.view(-1 ) )
else:
snake_case__ : str = nn.functional.log_softmax(_snake_case, dim=-1 )
snake_case__ , snake_case__ : Union[str, Any] = label_smoothed_nll_loss(
_snake_case, _snake_case, self.hparams.label_smoothing, ignore_index=_snake_case )
return (loss,)
@property
def lowercase_ ( self : Dict ) ->int:
return self.tokenizer.pad_token_id
def lowercase_ ( self : Union[str, Any], _snake_case : List[str], _snake_case : Any ) ->Dict:
snake_case__ : Dict = self._step(_snake_case )
snake_case__ : Optional[int] = dict(zip(self.loss_names, _snake_case ) )
# tokens per batch
snake_case__ : Optional[Any] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
snake_case__ : List[str] = batch['input_ids'].shape[0]
snake_case__ : List[str] = batch['input_ids'].eq(self.pad ).sum()
snake_case__ : Optional[Any] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self : Union[str, Any], _snake_case : Union[str, Any], _snake_case : List[str] ) ->Dict:
return self._generative_step(_snake_case )
def lowercase_ ( self : int, _snake_case : Dict, _snake_case : List[Any]="val" ) ->Dict:
self.step_count += 1
snake_case__ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case__ : Optional[int] = losses['loss']
snake_case__ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
snake_case__ : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case__ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
snake_case__ : Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
snake_case__ : List[str] = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
snake_case__ : List[str] = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Optional[int] ) ->Dict:
return calculate_rouge(_snake_case, _snake_case )
def lowercase_ ( self : Optional[int], _snake_case : dict ) ->dict:
snake_case__ : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case__ : List[str] = self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=_snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
snake_case__ : str = (time.time() - ta) / batch['input_ids'].shape[0]
snake_case__ : List[str] = self.ids_to_clean_text(_snake_case )
snake_case__ : List[str] = self.ids_to_clean_text(batch['labels'] )
snake_case__ : List[Any] = self._step(_snake_case )
snake_case__ : Any = dict(zip(self.loss_names, _snake_case ) )
snake_case__ : Dict = self.calc_generative_metrics(_snake_case, _snake_case )
snake_case__ : int = np.mean(lmap(_snake_case, _snake_case ) )
base_metrics.update(gen_time=_snake_case, gen_len=_snake_case, preds=_snake_case, target=_snake_case, **_snake_case )
return base_metrics
def lowercase_ ( self : Tuple, _snake_case : Dict, _snake_case : Union[str, Any] ) ->Tuple:
return self._generative_step(_snake_case )
def lowercase_ ( self : Dict, _snake_case : Union[str, Any] ) ->str:
return self.validation_epoch_end(_snake_case, prefix='test' )
def lowercase_ ( self : Union[str, Any], _snake_case : Any ) ->SeqaSeqDataset:
snake_case__ : Optional[int] = self.n_obs[type_path]
snake_case__ : str = self.target_lens[type_path]
snake_case__ : Optional[int] = self.dataset_class(
self.tokenizer, type_path=_snake_case, n_obs=_snake_case, max_target_length=_snake_case, **self.dataset_kwargs, )
return dataset
def lowercase_ ( self : Any, _snake_case : str, _snake_case : int, _snake_case : bool = False ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case__ : str = dataset.make_sortish_sampler(_snake_case, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case__ : Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_sampler=_snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
def lowercase_ ( self : int ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=_snake_case )
return dataloader
def lowercase_ ( self : str ) ->DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self : List[Any] ) ->DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( _snake_case : Dict, _snake_case : str ) ->str:
BaseTransformer.add_model_specific_args(_snake_case, _snake_case )
add_generic_args(_snake_case, _snake_case )
parser.add_argument(
'--max_source_length', default=1_0_2_4, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=5_6, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true' )
parser.add_argument('--freeze_embeds', action='store_true' )
parser.add_argument('--sortish_sampler', action='store_true', default=_snake_case )
parser.add_argument('--overwrite_output_dir', action='store_true', default=_snake_case )
parser.add_argument('--max_tokens_per_batch', type=_snake_case, default=_snake_case )
parser.add_argument('--logger_name', type=_snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default' )
parser.add_argument('--n_train', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_val', type=_snake_case, default=5_0_0, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_test', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument(
'--task', type=_snake_case, default='summarization', required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing', type=_snake_case, default=0.0, required=_snake_case )
parser.add_argument('--src_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--tgt_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--eval_beams', type=_snake_case, default=_snake_case, required=_snake_case )
parser.add_argument(
'--val_metric', type=_snake_case, default=_snake_case, required=_snake_case, choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length', type=_snake_case, default=_snake_case, help='never generate more than n tokens' )
parser.add_argument('--save_top_k', type=_snake_case, default=1, required=_snake_case, help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience', type=_snake_case, default=-1, required=_snake_case, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """translation"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ["""bleu"""]
_SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self : List[Any], _snake_case : str, **_snake_case : Tuple ) ->List[str]:
super().__init__(_snake_case, **_snake_case )
snake_case__ : Any = hparams.src_lang
snake_case__ : int = hparams.tgt_lang
def lowercase_ ( self : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any] ) ->dict:
return calculate_bleu(_snake_case, _snake_case )
def lowercase_ (A : str , A : List[Any]=None ):
Path(args.output_dir ).mkdir(exist_ok=A )
check_output_dir(A , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case__ : SummarizationModule = SummarizationModule(A )
else:
snake_case__ : SummarizationModule = TranslationModule(A )
snake_case__ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
snake_case__ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : Dict = os.environ.get('WANDB_PROJECT' , A )
snake_case__ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
snake_case__ : Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case__ : List[str] = False
snake_case__ : List[Any] = args.val_metric == 'loss'
snake_case__ : pl.Trainer = generic_train(
A , A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , A ) , early_stopping_callback=A , logger=A , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
snake_case__ : int = ''
snake_case__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=A ) )
if checkpoints:
snake_case__ : Any = checkpoints[-1]
snake_case__ : List[str] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
a_ :Dict = pl.Trainer.add_argparse_args(parser)
a_ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a_ :Dict = parser.parse_args()
main(args)
| 243 | 1 |
import sys
from collections import defaultdict
class UpperCAmelCase_ :
def __init__( self ):
"""simple docstring"""
A_ = []
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
return self.node_position[vertex]
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = pos
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A_ = 2 * start + 1
else:
A_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A_ , A_ = heap[smallest_child], positions[smallest_child]
A_ , A_ = (
heap[start],
positions[start],
)
A_ , A_ = temp, tempa
A_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = position[index]
while index != 0:
A_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A_ = heap[parent]
A_ = position[parent]
self.set_position(position[parent] ,lowerCAmelCase_ )
else:
A_ = val
A_ = temp
self.set_position(lowerCAmelCase_ ,lowerCAmelCase_ )
break
A_ = parent
else:
A_ = val
A_ = temp
self.set_position(lowerCAmelCase_ ,0 )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ ,-1 ,-1 ):
self.top_to_bottom(lowerCAmelCase_ ,lowerCAmelCase_ ,len(lowerCAmelCase_ ) ,lowerCAmelCase_ )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = positions[0]
A_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ ,0 ,len(lowerCAmelCase_ ) ,lowerCAmelCase_ )
return temp
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] ) -> Dict:
'''simple docstring'''
A_ = Heap()
A_ = [0] * len(_UpperCAmelCase )
A_ = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A_ = [] # Heap of Distance of vertices from their neighboring vertex
A_ = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
A_ = []
A_ = 1
A_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A_ = 0
A_ = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
A_ = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
A_ = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
A_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a__ : List[str] = int(input('Enter number of edges: ').strip())
a__ : List[str] = defaultdict(list)
for _ in range(edges_number):
a__ : List[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 188 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
__A = 42
__A = None
__A = None
_lowerCamelCase : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCAmelCase ( __magic_name__ :TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__magic_name__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 0 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE : Any = """docs/source/en/_toctree.yml"""
def __A ( _A ):
"""simple docstring"""
__a = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
__a = [key for key, value in counts.items() if value > 1]
__a = []
for duplicate_key in duplicates:
__a = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __A ( _A=False ):
"""simple docstring"""
with open(_A , encoding="utf-8" ) as f:
__a = yaml.safe_load(f.read() )
# Get to the API doc
__a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__a = content[api_idx]["sections"]
# Then to the model doc
__a = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__a = api_doc[model_idx]["sections"]
__a = [(idx, section) for idx, section in enumerate(_A ) if "sections" in section]
__a = False
for idx, modality_doc in modalities_docs:
__a = modality_doc["sections"]
__a = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
__a = True
if overwrite:
__a = new_modality_doc
if diff:
if overwrite:
__a = model_doc
__a = api_doc
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 714 | import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A_ :
_SCREAMING_SNAKE_CASE = PegasusConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Tuple=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def _UpperCAmelCase ( self : Optional[int] ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__a = np.concatenate([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __A ( _A , _A , _A , _A=None , _A=None , ):
"""simple docstring"""
if attention_mask is None:
__a = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : int ):
__a = FlaxPegasusModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[int] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self : List[str] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest("JIT Enabled" ):
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__SCREAMING_SNAKE_CASE )
__a = np.ones((1, 1) )
__a = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : str ):
__a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__a = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="np" , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 , padding=__SCREAMING_SNAKE_CASE )
__a = model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 525 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( _UpperCAmelCase ):
UpperCamelCase_ :List[Any] = (DDIMParallelScheduler,)
UpperCamelCase_ :List[str] = (('eta', 0.0), ('num_inference_steps', 50))
def __snake_case ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase__ )
return config
def __snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**lowercase__ )
lowerCAmelCase__ = scheduler_class(**lowercase__ )
lowerCAmelCase__ = 10, 0.0
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = model(lowercase__ , lowercase__ )
lowerCAmelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def __snake_case ( self : List[str] ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def __snake_case ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**lowercase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def __snake_case ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def __snake_case ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def __snake_case ( self : List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def __snake_case ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase__ )
def __snake_case ( self : Union[str, Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase__ )
def __snake_case ( self : List[str] ):
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def __snake_case ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase__ )
def __snake_case ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ )
def __snake_case ( self : Tuple ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase__ , eta=lowercase__ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase__ )
lowerCAmelCase__ = 10, 0.0
scheduler.set_timesteps(lowercase__ )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
lowerCAmelCase__ = self.dummy_sample_deter + 0.1
lowerCAmelCase__ = self.dummy_sample_deter - 0.1
lowerCAmelCase__ = samplea.shape[0]
lowerCAmelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
lowerCAmelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __snake_case ( self : int ):
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(lowercase__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __snake_case ( self : str ):
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __snake_case ( self : str ):
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 668 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 5_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ : Optional[int] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 421 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 |
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 229 | 1 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10**6 ) -> int:
snake_case__ = 0
snake_case__ = 1
snake_case__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase__ , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowercase_ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowercase_ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowercase_ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase__ )
| 412 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCamelCase :Tuple = 250_004
__lowerCamelCase :Dict = 250_020
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =MBartTokenizer
snake_case__ : Optional[int] =MBartTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : str =True
def a__ ( self: Tuple )-> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Optional[int] = MBartTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: List[str] )-> Optional[int]:
lowerCamelCase : List[str] = MBartTokenizer(__a , keep_accents=__a )
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a__ ( self: Optional[Any] )-> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : str = self.tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Tuple = tokenizer_r.save_pretrained(__a )
lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : int = tempfile.mkdtemp()
lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase : Any = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : Dict = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : Tuple = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict ='''facebook/mbart-large-en-ro'''
snake_case__ : int =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
snake_case__ : Optional[int] =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
snake_case__ : Optional[int] =[82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def a__ ( cls: int )-> Union[str, Any]:
lowerCamelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCamelCase : List[Any] = 1
return cls
def a__ ( self: Tuple )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: Optional[int] )-> Optional[Any]:
self.assertIn(__a , self.tokenizer.all_special_ids )
lowerCamelCase : Union[str, Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
lowerCamelCase : Tuple = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __a )
lowerCamelCase : Dict = 10
lowerCamelCase : Union[str, Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def a__ ( self: str )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] )
def a__ ( self: Tuple )-> List[str]:
lowerCamelCase : str = tempfile.mkdtemp()
lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
lowerCamelCase : Optional[int] = MBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def a__ ( self: Dict )-> int:
lowerCamelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a__ ( self: Dict )-> Dict:
lowerCamelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def a__ ( self: str )-> Dict:
lowerCamelCase : Any = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
lowerCamelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = targets["""input_ids"""]
lowerCamelCase : Optional[int] = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3_034, 2, 250_004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 42 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = LEDTokenizer
a__ : Tuple = LEDTokenizerFast
a__ : Optional[int] = True
def UpperCamelCase__ ( self) -> Dict:
super().setUp()
__UpperCamelCase :List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :Optional[int] = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :str = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Tuple:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Any:
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self) -> int:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def UpperCamelCase__ ( self) -> List[Any]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__UpperCamelCase :Optional[int] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :List[Any] = tokenizer(__lowercase , max_length=len(__lowercase) , padding=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__UpperCamelCase :List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :str = tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''')
self.assertIn('''input_ids''' , __lowercase)
self.assertIn('''attention_mask''' , __lowercase)
self.assertNotIn('''labels''' , __lowercase)
self.assertNotIn('''decoder_attention_mask''' , __lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :int = tokenizer(text_target=__lowercase , max_length=32 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(32 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCamelCase__ ( self) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :str = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__lowercase , truncation=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual(batch.input_ids.shape , (2, 5_122))
@require_torch
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = ['''A long paragraph for summarization.''']
__UpperCamelCase :str = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Tuple = tokenizer(__lowercase , return_tensors='''pt''')
__UpperCamelCase :List[str] = tokenizer(text_target=__lowercase , return_tensors='''pt''')
__UpperCamelCase :Any = inputs['''input_ids''']
__UpperCamelCase :Optional[Any] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCamelCase__ ( self) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Dict = ['''Summary of the text.''', '''Another summary.''']
__UpperCamelCase :List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCamelCase :Union[str, Any] = tokenizer(__lowercase , padding=__lowercase)
__UpperCamelCase :Any = [[0] * len(__lowercase) for x in encoded_output['''input_ids''']]
__UpperCamelCase :int = tokenizer.pad(__lowercase)
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
pass
def UpperCamelCase__ ( self) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :Dict = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :str = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :Optional[int] = '''A, <mask> AllenNLP sentence.'''
__UpperCamelCase :List[Any] = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
__UpperCamelCase :str = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
__UpperCamelCase :Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
__UpperCamelCase :List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 167 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowercase = {
'''camembert-base''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=["<s>NOTUSED", "</s>NOTUSED"] , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
__UpperCamelCase :Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCamelCase :Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCamelCase :int = len(self.fairseq_tokens_to_ids)
__UpperCamelCase :List[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__UpperCamelCase :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
__UpperCamelCase :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[Any] = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase__ ( self) -> int:
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowercase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = []
__UpperCamelCase :List[str] = ''''''
__UpperCamelCase :Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :Optional[Any] = True
__UpperCamelCase :List[str] = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :int = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def __getstate__( self) -> Dict:
__UpperCamelCase :Dict = self.__dict__.copy()
__UpperCamelCase :Any = None
return state
def __setstate__( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :List[str] = {}
__UpperCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :int = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 167 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowercase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 | '''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( _A: float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_A , 0 , _A , args=(_A) )[0]
def snake_case__ ( _A: float , _A: float ) -> float:
'''simple docstring'''
return math.pow(_A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 605 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = CpmAntTokenizer
snake_case__ :str = False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
lowerCAmelCase__ = "今天天气真好!"
lowerCAmelCase__ = ["今天", "天气", "真", "好", "!"]
lowerCAmelCase__ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = "今天天气真好!"
lowerCAmelCase__ = [tokenizer.bos_token] + tokens
lowerCAmelCase__ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
lowerCAmelCase__ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 48 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : str ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(snake_case_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case_ : EvalPrediction ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case_ , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case_ , snake_case_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case_ )
return results
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 156 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __UpperCAmelCase :
'''simple docstring'''
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return None
class __UpperCAmelCase :
'''simple docstring'''
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return None
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , '''tf''' , 12 , **a_ )
@require_torch
@slow
def lowerCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , '''pt''' , 12 , **a_ )
@require_torch
@slow
def lowerCamelCase ( self ):
from transformers import BertModel
UpperCAmelCase__ : Dict = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(a_ ) )
vocab_file.flush()
UpperCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(a_ ) ) )
model.save_pretrained(a_ )
self._test_export(a_ , '''pt''' , 12 , a_ )
@require_tf
@slow
def lowerCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase__ : Dict = self._test_export(a_ , '''tf''' , 12 , **a_ )
UpperCAmelCase__ : List[Any] = quantize(Path(a_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase__ : int = self._test_export(a_ , '''pt''' , 12 , **a_ )
UpperCAmelCase__ : Union[str, Any] = quantize(a_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCAmelCase__ : Tuple = Path(a_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a_ , a_ , a_ , a_ , a_ , **a_ )
return path
except Exception as e:
self.fail(a_ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase ( self ):
from transformers import BertModel
UpperCAmelCase__ : str = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCAmelCase__ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(a_ , a_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase ( self ):
from transformers import TFBertModel
UpperCAmelCase__ : List[str] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCAmelCase__ : List[str] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(a_ , a_ , '''tf''' )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = FeatureExtractionPipeline(a_ , a_ )
UpperCAmelCase__ : Dict = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
UpperCAmelCase__ : Tuple = infer_shapes(a_ , a_ )
# Assert all variables are present
self.assertEqual(len(a_ ) , len(a_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a_ )
self.assertSequenceEqual(variable_names[3:] , a_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = ["input_ids", "attention_mask", "token_type_ids"]
UpperCAmelCase__ : Optional[int] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
UpperCAmelCase__ : Tuple = ensure_valid_input(FuncContiguousArgs() , a_ , a_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a_ ) , set(a_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCAmelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs() , a_ , a_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() ) | 707 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase_ = "<<<<<<< This should probably be modified because it mentions: "
UpperCamelCase_ = "=======\n>>>>>>>\n"
UpperCamelCase_ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
UpperCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowerCAmelCase__ ( a_ : Namespace ) -> str:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
UpperCAmelCase__ : Tuple = get_logger('''datasets-cli/converting''' )
UpperCAmelCase__ : Tuple = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def lowerCamelCase ( self ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase__ : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
UpperCAmelCase__ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : str = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ : List[str] = os.listdir(_UpperCAmelCase )
else:
UpperCAmelCase__ : str = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
UpperCAmelCase__ : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : List[str] = f.readlines()
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = []
for line in lines:
UpperCAmelCase__ : List[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Dict = ''''''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
UpperCAmelCase__ : List[str] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Tuple = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : Optional[Any] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
UpperCAmelCase__ : str = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : Dict = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : List[str] = f_name.replace('''.py''' , '''''' )
UpperCAmelCase__ : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[Any] = os.path.basename(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" ) | 599 | 0 |
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 402 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 402 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
_a = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
_a = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCamelCase :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 720 |
import csv
import tweepy
# Twitter API credentials
lowerCamelCase :Optional[int] = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Optional[Any] = ''
def __snake_case ( _UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
_a = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
_a = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_UpperCamelCase , count=2_00 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f"...{len(_UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
_a = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 346 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase :int = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 561 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase :Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase :str = 1_2_8_0_2_2
lowerCAmelCase :int = 1_2_8_0_2_8
@require_sentencepiece
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = MaMaaaTokenizer
A_ : Any = False
A_ : List[str] = False
A_ : Optional[Any] = True
def __lowerCAmelCase ( self : List[Any] ) -> int:
super().setUp()
__magic_name__ : int = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__magic_name__ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) )
__magic_name__ : int = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__magic_name__ : Optional[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] , **_A : int ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Any , _A : str ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : Union[str, Any] = '</s>'
__magic_name__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : Optional[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
__magic_name__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> int:
# fmt: off
__magic_name__ : int = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : str = """facebook/m2m100_418M"""
A_ : Tuple = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
A_ : Tuple = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
A_ : List[Any] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : int ) -> List[str]:
__magic_name__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__magic_name__ : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : List[str] ) -> str:
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128063 )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _A )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
__magic_name__ : List[str] = 'en'
__magic_name__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __lowerCAmelCase ( self : int ) -> List[str]:
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
__magic_name__ : List[str] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
__magic_name__ : List[Any] = self.tokenizer.decode(_A , skip_special_tokens=_A )
__magic_name__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : Tuple = 'en'
__magic_name__ : Dict = 'fr'
__magic_name__ : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='pt' )
__magic_name__ : int = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__magic_name__ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : int = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__magic_name__ : Optional[int] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : Union[str, Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__magic_name__ : Union[str, Any] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'input_ids': [[128022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128006,
} , ) | 561 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self : str , UpperCamelCase : int=0 )->Dict:
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(UpperCamelCase ) )
__SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.7_5,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Optional[Any] )->Dict:
__SCREAMING_SNAKE_CASE : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self : str )->List[Any]:
__SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self : List[str] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# warmup pass to apply optimizations
__SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs() )
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self : Union[str, Any] )->Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : Tuple = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self : Dict )->Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : Dict = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self : Optional[int] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE : Any = pipe(**UpperCamelCase ).images
__SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@property
def __snake_case ( self : Optional[int] )->Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self : List[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = ort.SessionOptions()
__SCREAMING_SNAKE_CASE : List[str] = False
return options
def __snake_case ( self : Optional[int] )->str:
__SCREAMING_SNAKE_CASE : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__SCREAMING_SNAKE_CASE : List[str] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = "A fantasy landscape, trending on artstation"
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=UpperCamelCase , output_type="np" , )
__SCREAMING_SNAKE_CASE : List[str] = output.images
__SCREAMING_SNAKE_CASE : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self : Union[str, Any] )->Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__SCREAMING_SNAKE_CASE : List[Any] = init_image.resize((7_6_8, 5_1_2) )
__SCREAMING_SNAKE_CASE : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__SCREAMING_SNAKE_CASE : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = "A fantasy landscape, trending on artstation"
__SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
__SCREAMING_SNAKE_CASE : str = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=UpperCamelCase , output_type="np" , )
__SCREAMING_SNAKE_CASE : Tuple = output.images
__SCREAMING_SNAKE_CASE : int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 447 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _SCREAMING_SNAKE_CASE :
pass
| 447 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: Any , *UpperCamelCase_: Tuple , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
lowercase__ = quant_trainer_args
lowercase__ = 128 # default number of calibration samples
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int=None ) -> Optional[Any]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
lowercase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ = self.get_calib_dataloader(UpperCamelCase_ )
lowercase__ = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = model
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: str=None , UpperCamelCase_: str = "eval" ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
lowercase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str = "test" ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Union[str, Any]="./" ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
lowercase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
lowercase__ = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ = True
lowercase__ = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
lowercase__ = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = os.path.join(UpperCamelCase_ , '''model.onnx''' )
logger.info(f'exporting model to {output_model_file}' )
lowercase__ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase_ , )
logger.info('''onnx export finished''' )
| 43 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 | 1 |
'''simple docstring'''
import argparse
import os
import re
__lowercase = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowercase = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__lowercase = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : str =f.read()
lowerCAmelCase_ : Optional[Any] =content.split('''\n''' )
lowerCAmelCase_ : Optional[Any] =[]
lowerCAmelCase_ : List[Any] =0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCAmelCase_ : List[str] =len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCAmelCase_ : List[Any] =[]
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCAmelCase_ : Optional[int] =line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCAmelCase_ : Optional[Any] =sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = False ):
lowerCAmelCase_ : Dict =[os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith('''.py''' )]
lowerCAmelCase_ : Dict =[sort_auto_mapping(_SCREAMING_SNAKE_CASE , overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : List[Any] =[f for f, d in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowercase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 305 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
lowerCAmelCase_ : str =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ : Union[str, Any] =is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowerCAmelCase_ : Dict =model
lowerCAmelCase_ : Tuple =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ : List[str] =getattr(_SCREAMING_SNAKE_CASE , '''forward''' )
lowerCAmelCase_ : Any =model.__dict__.pop('''_original_forward''' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
lowerCAmelCase_ : List[str] =forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ : List[str] =forward
if getattr(_SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowerCAmelCase_ : Tuple =model
lowerCAmelCase_ : Any =compiled_model
return model
def SCREAMING_SNAKE_CASE__ ( ):
PartialState().wait_for_everyone()
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( **_SCREAMING_SNAKE_CASE ):
for key, value in kwargs.items():
lowerCAmelCase_ : int =str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if not hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
lowerCAmelCase_ : List[Any] =getattr(_SCREAMING_SNAKE_CASE , '''__class__''' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Union[str, Any] =destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ : Optional[Any] =value
return destination
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = None ):
if port is None:
lowerCAmelCase_ : str =2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 305 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase = concatenate_datasets
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadManager
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadConfig
lowerCamelCase = DownloadMode
lowerCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 82 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
snake_case__ : int = (DEISMultistepScheduler,)
snake_case__ : List[str] = (('''num_inference_steps''', 25),)
def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
a_ : Dict = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase_ )
return config
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
a_ : Any = dict(self.forward_default_kwargs )
a_ : List[str] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
a_ : Dict = self.dummy_sample
a_ : Any = 0.1 * sample
a_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : int = self.get_scheduler_config(**lowerCamelCase_ )
a_ : Optional[int] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
a_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
a_ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
a_ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ : str = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
a_ : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
a_ : Optional[int] = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
a_ : Tuple = dict(self.forward_default_kwargs )
a_ : Optional[Any] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
a_ : Optional[int] = self.dummy_sample
a_ : Optional[Any] = 0.1 * sample
a_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : str = self.get_scheduler_config()
a_ : Any = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
a_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
a_ : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
a_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
a_ : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
if scheduler is None:
a_ : Optional[int] = self.scheduler_classes[0]
a_ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
a_ : int = scheduler_class(**lowerCamelCase_ )
a_ : List[Any] = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config(**lowerCamelCase_ )
a_ : str = scheduler_class(**lowerCamelCase_ )
a_ : Tuple = 1_0
a_ : Any = self.dummy_model()
a_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Tuple = dict(self.forward_default_kwargs )
a_ : int = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
a_ : str = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
a_ : Optional[int] = self.dummy_sample
a_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
a_ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ : int = dummy_past_residuals[: scheduler.config.solver_order]
a_ : int = scheduler.timesteps[5]
a_ : int = scheduler.timesteps[6]
a_ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
a_ : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : str = DEISMultistepScheduler(**self.get_scheduler_config() )
a_ : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
a_ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
a_ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a_ : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
a_ : int = DEISMultistepScheduler.from_config(scheduler.config )
a_ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase_ )
a_ : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
a_ : Dict = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : List[Any] = self.full_loop()
a_ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : List[str] = self.full_loop(prediction_type='v_prediction' )
a_ : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : Optional[int] = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
a_ : Dict = scheduler_class(**lowerCamelCase_ )
a_ : List[str] = 1_0
a_ : Optional[int] = self.dummy_model()
a_ : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 713 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Tuple , __A : Tuple=8 ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Any:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if latents is None:
a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Any = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
a_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> int:
a_ : Dict = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : Optional[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.scheduler.timesteps
a_ : Optional[int] = self.movq.config.latent_channels
a_ , a_ : Dict = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : int = {'image_embeds': image_embeds, 'hint': hint}
a_ : str = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : str = noise_pred.chunk(2 )
a_ , a_ : Union[str, Any] = variance_pred.chunk(2 )
a_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : str = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : str = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : Union[str, Any] = image.clamp(0 , 1 )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 443 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 95 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : str , snake_case__ : Dict , snake_case__ : Optional[int]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Dict=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Tuple=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=10 , snake_case__ : List[str]=0.0_2 , snake_case__ : int=3 , snake_case__ : Tuple=None , snake_case__ : Any=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ : List[str] = False
lowercase_ : Dict = False
lowercase_ : str = False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
__lowerCAmelCase = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__lowerCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(snake_case__ )
| 611 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
a = """microsoft/speecht5_tts"""
a = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a = """text_reader"""
a = SpeechTaProcessor
a = SpeechTaForTextToSpeech
a = SpeechTaHifiGan
a = ["""text"""]
a = ["""audio"""]
def lowerCamelCase_ ( self: int ):
if self.post_processor is None:
lowerCamelCase__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: str=None ):
lowerCamelCase__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowerCamelCase__ : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
lowerCamelCase__ : Dict = torch.tensor(embeddings_dataset[7_305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase_=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :int = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ :Any = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :List[Any] = image_size
UpperCamelCase__ :Optional[int] = min_resolution
UpperCamelCase__ :int = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :Any = size
UpperCamelCase__ :Optional[Any] = do_center_crop
UpperCamelCase__ :Optional[int] = crop_size
UpperCamelCase__ :Optional[Any] = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Tuple = image_std
UpperCamelCase__ :str = do_convert_rgb
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase__ ( self , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase__ :Union[str, Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase__ :List[str] = []
for i in range(self.batch_size ):
UpperCamelCase__ , UpperCamelCase__ :int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase__ :Dict = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase__ :Optional[int] = [torch.from_numpy(__SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCamelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :str = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Tuple = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ :str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = 3
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ :str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 189 |
"""simple docstring"""
import torch
def _a ( ) -> List[Any]:
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 482 | 0 |
import math
def a_ ( __snake_case ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __snake_case = 1_0_0_0_1 ) -> int:
'''simple docstring'''
try:
UpperCamelCase_ = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
UpperCamelCase_ = []
UpperCamelCase_ = 2
while len(__snake_case ) < nth:
if is_prime(__snake_case ):
primes.append(__snake_case )
num += 1
else:
num += 1
return primes[len(__snake_case ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 559 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a : Optional[Any] = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["""LayoutLMv2FeatureExtractor"""]
__a : str = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 59 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : Dict = ["""input_features""", """attention_mask"""]
def __init__( self , _A=80 , _A=1_60_00 , _A=0.0 , _A=10 , _A=25 , _A="hamming_window" , _A=32768.0 , _A=0.97 , _A=1.0 , _A=True , _A=True , _A=False , **_A , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
_UpperCAmelCase : List[str] = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : List[str] = padding_value
_UpperCAmelCase : Dict = hop_length
_UpperCAmelCase : List[str] = win_length
_UpperCAmelCase : Tuple = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : int = mel_floor
_UpperCAmelCase : Tuple = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : List[Any] = win_function
_UpperCAmelCase : List[Any] = return_attention_mask
_UpperCAmelCase : str = win_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Dict = (self.n_fft // 2) + 1
def __snake_case ( self , _A ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
_UpperCAmelCase : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
else:
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=_A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_A , preemphasis=self.preemphasis_coeff , mel_filters=_A , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __snake_case ( self , _A , _A , _A ) -> Any:
'''simple docstring'''
if self.normalize_means:
_UpperCAmelCase : List[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[str] = np.subtract(_A , _A )
if self.normalize_vars:
_UpperCAmelCase : Dict = x[:input_length].std(axis=0 )
_UpperCAmelCase : Tuple = np.divide(_A , _A )
if input_length < x.shape[0]:
_UpperCAmelCase : Optional[Any] = padding_value
# make sure array is in float32
_UpperCAmelCase : Any = x.astype(np.floataa )
return x
def __snake_case ( self , _A , _A = None ) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_A , _A , self.padding_value ) for x, n in zip(_A , _A )]
def __call__( self , _A , _A = False , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_UpperCAmelCase : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Optional[int] = [raw_speech]
# extract fbank features
_UpperCAmelCase : int = [self._extract_mfsc_features(_A ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Any = BatchFeature({"""input_features""": features} )
_UpperCAmelCase : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
_UpperCAmelCase : List[Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _A ):
_UpperCAmelCase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_UpperCAmelCase : Any = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_A , dtype=np.intaa )
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : List[str] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_A )
if return_tensors is not None:
_UpperCAmelCase : Dict = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 238 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[int] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "fnet"
def __init__( self , snake_case_=3_2_0_0_0 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=4 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=False , snake_case_=5_1_2 , snake_case_=3 , snake_case_=1 , snake_case_=2 , **snake_case_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = use_tpu_fourier_optimizations
__lowercase = tpu_short_seq_length
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __a : List[Any] , __a : List[Any]=7 , __a : Dict=3 , __a : Dict=18 , __a : Tuple=30 , __a : Tuple=400 , __a : Dict=True , __a : Optional[int]=None , __a : Union[str, Any]=True , __a : Optional[Any]=[0.5, 0.5, 0.5] , __a : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Dict:
_UpperCamelCase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase : Tuple = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : int = max_resolution
_UpperCamelCase : Any = do_resize
_UpperCamelCase : Any = size
_UpperCamelCase : Any = do_normalize
_UpperCamelCase : Optional[int] = image_mean
_UpperCamelCase : List[Any] = image_std
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = DPTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Optional[Any] = DPTImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
_UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_UpperCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Optional[Any] = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
# Initialize image_processing
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_UpperCamelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Dict = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Any = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 624 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
_UpperCamelCase : Optional[Any] = 1_024
_UpperCamelCase : Optional[int] = 4_096
_UpperCamelCase : List[Any] = 24
_UpperCamelCase : List[Any] = 16
_UpperCamelCase : Optional[Any] = [5, 11, 17, 23]
_UpperCamelCase : Tuple = [256, 512, 1_024, 1_024]
_UpperCamelCase : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : str = 768
_UpperCamelCase : Any = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [256, 512, 768, 768]
_UpperCamelCase : Tuple = 150
_UpperCamelCase : int = 16
_UpperCamelCase : Any = (1, 384, 384)
_UpperCamelCase : Any = False
_UpperCamelCase : Any = "project"
if "ade" in checkpoint_url:
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[Any] = 768
_UpperCamelCase : Optional[int] = [1, 1, 1, 0.5]
_UpperCamelCase : Dict = 150
_UpperCamelCase : Any = 16
_UpperCamelCase : Any = "huggingface/label-files"
_UpperCamelCase : List[str] = "ade20k-id2label.json"
_UpperCamelCase : Any = json.load(open(cached_download(hf_hub_url(lowercase_ ,lowercase_ ,repo_type="dataset" ) ) ,"r" ) )
_UpperCamelCase : Any = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : int = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Any = [1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Any = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowercase_ ,lowercase_ )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[Any] = name.replace("pretrained.model" ,"dpt.encoder" )
if "pretrained.model" in name:
_UpperCamelCase : Dict = name.replace("pretrained.model" ,"dpt.embeddings" )
if "patch_embed" in name:
_UpperCamelCase : Tuple = name.replace("patch_embed" ,"" )
if "pos_embed" in name:
_UpperCamelCase : Optional[Any] = name.replace("pos_embed" ,"position_embeddings" )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "proj" in name and "project" not in name:
_UpperCamelCase : str = name.replace("proj" ,"projection" )
if "blocks" in name:
_UpperCamelCase : Dict = name.replace("blocks" ,"layer" )
if "mlp.fc1" in name:
_UpperCamelCase : int = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Tuple = name.replace("mlp.fc2" ,"output.dense" )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : Optional[int] = name.replace("norm2" ,"layernorm_after" )
if "scratch.output_conv" in name:
_UpperCamelCase : Tuple = name.replace("scratch.output_conv" ,"head" )
if "scratch" in name:
_UpperCamelCase : Union[str, Any] = name.replace("scratch" ,"neck" )
if "layer1_rn" in name:
_UpperCamelCase : Optional[int] = name.replace("layer1_rn" ,"convs.0" )
if "layer2_rn" in name:
_UpperCamelCase : List[Any] = name.replace("layer2_rn" ,"convs.1" )
if "layer3_rn" in name:
_UpperCamelCase : Any = name.replace("layer3_rn" ,"convs.2" )
if "layer4_rn" in name:
_UpperCamelCase : Optional[int] = name.replace("layer4_rn" ,"convs.3" )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : int = name.replace(F'''refinenet{layer_idx}''' ,F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCamelCase : List[str] = name.replace("out_conv" ,"projection" )
if "resConfUnit1" in name:
_UpperCamelCase : Any = name.replace("resConfUnit1" ,"residual_layer1" )
if "resConfUnit2" in name:
_UpperCamelCase : Any = name.replace("resConfUnit2" ,"residual_layer2" )
if "conv1" in name:
_UpperCamelCase : Optional[Any] = name.replace("conv1" ,"convolution1" )
if "conv2" in name:
_UpperCamelCase : Optional[Any] = name.replace("conv2" ,"convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace("pretrained.act_postprocess1.0.project.0" ,"neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace("pretrained.act_postprocess2.0.project.0" ,"neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : str = name.replace("pretrained.act_postprocess3.0.project.0" ,"neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Dict = name.replace("pretrained.act_postprocess4.0.project.0" ,"neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Tuple = name.replace("pretrained.act_postprocess1.3" ,"neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : str = name.replace("pretrained.act_postprocess1.4" ,"neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : int = name.replace("pretrained.act_postprocess2.3" ,"neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : Optional[Any] = name.replace("pretrained.act_postprocess2.4" ,"neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Any = name.replace("pretrained.act_postprocess3.3" ,"neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Tuple = name.replace("pretrained.act_postprocess4.3" ,"neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace("pretrained.act_postprocess4.4" ,"neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_UpperCamelCase : Dict = name.replace("pretrained" ,"dpt" )
if "bn" in name:
_UpperCamelCase : int = name.replace("bn" ,"batch_norm" )
if "head" in name:
_UpperCamelCase : Optional[int] = name.replace("head" ,"head.head" )
if "encoder.norm" in name:
_UpperCamelCase : Optional[Any] = name.replace("encoder.norm" ,"layernorm" )
if "auxlayer" in name:
_UpperCamelCase : str = name.replace("auxlayer" ,"auxiliary_head.head" )
if "backbone" in name:
_UpperCamelCase : Optional[int] = name.replace("backbone" ,"backbone.bit.encoder" )
if ".." in name:
_UpperCamelCase : List[str] = name.replace(".." ,"." )
if "stem.conv" in name:
_UpperCamelCase : Optional[int] = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace("blocks" ,"layers" )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : Any = name.replace("convolution" ,"conv" )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Optional[Any] = name.replace("layer" ,"layers" )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace("backbone.bit.encoder.bit" ,"backbone.bit" )
if "embedder.conv" in name:
_UpperCamelCase : Union[str, Any] = name.replace("embedder.conv" ,"embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Dict = name.replace("backbone.bit.encoder.stem.norm" ,"backbone.bit.embedder.norm" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCamelCase : Tuple = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : Dict = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : str = get_dpt_config(lowercase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : Optional[int] = torch.load(lowercase_ ,map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Union[str, Any] = state_dict.pop(lowercase_ )
_UpperCamelCase : Tuple = val
# read in qkv matrices
read_in_q_k_v(lowercase_ ,lowercase_ )
# load HuggingFace model
_UpperCamelCase : Dict = DPTForSemanticSegmentation(lowercase_ ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : List[Any] = 480 if "ade" in checkpoint_url else 384
_UpperCamelCase : List[str] = DPTImageProcessor(size=lowercase_ )
_UpperCamelCase : int = prepare_img()
_UpperCamelCase : List[str] = image_processor(lowercase_ ,return_tensors="pt" )
# forward pass
_UpperCamelCase : Any = model(**lowercase_ ).logits if "ade" in checkpoint_url else model(**lowercase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : Optional[int] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode="bicubic" ,align_corners=lowercase_ ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 624 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
SCREAMING_SNAKE_CASE__ : Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
if os.name == "nt":
_lowerCamelCase = CursorInfo()
_lowerCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case , ctypes.byref(snake_case ) )
_lowerCamelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case , ctypes.byref(snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
if os.name == "nt":
_lowerCamelCase = CursorInfo()
_lowerCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case , ctypes.byref(snake_case ) )
_lowerCamelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case , ctypes.byref(snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE_ ( )-> int:
try:
hide_cursor()
yield
finally:
show_cursor()
| 222 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str = "cpu" , snake_case : Union[str, None] = None )-> None:
_lowerCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCamelCase = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 222 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> List[str]:
'''simple docstring'''
super().__init__(__lowerCamelCase )
snake_case_ : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
snake_case_ : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=0.5 , _lowercase=0.5 ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.vision_model(__lowerCamelCase )[0]
snake_case_ : List[str] = self.p_head(__lowerCamelCase )
snake_case_ : Optional[Any] = nsfw_detected.flatten()
snake_case_ : int = nsfw_detected > p_threshold
snake_case_ : str = nsfw_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__lowerCamelCase ):
if nsfw_detected_:
snake_case_ : Tuple = np.zeros(images[idx].shape )
snake_case_ : List[str] = self.w_head(__lowerCamelCase )
snake_case_ : Tuple = watermark_detected.flatten()
snake_case_ : Any = watermark_detected > w_threshold
snake_case_ : str = watermark_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__lowerCamelCase ):
if watermark_detected_:
snake_case_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 58 |
a__: str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 190 | 0 |
'''simple docstring'''
from PIL import Image
def UpperCamelCase_ ( A__ , A__ ):
def brightness(A__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowercase__ =change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 511 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ =logging.get_logger(__name__)
def UpperCamelCase_ ( A__ ):
if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 2_55 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = size if size is not None else {"""shortest_edge""": 2_24}
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
a_ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
a_ = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
a_ = do_resize
a_ = size
a_ = do_center_crop
a_ = crop_size
a_ = resample
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ):
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
a_ = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
a_ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
a_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
a_ = to_numpy_array(UpperCAmelCase )
if do_resize:
a_ = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
a_ = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
a_ = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
a_ = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
a_ = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
a_ = do_resize if do_resize is not None else self.do_resize
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = size if size is not None else self.size
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
a_ = make_batched(UpperCAmelCase )
a_ = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
a_ = {"""pixel_values""": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 511 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , lowercase , **lowercase ) -> Optional[int]:
'''simple docstring'''
return super().__call__(lowercase , **lowercase )
def UpperCamelCase ( self , **lowercase ) -> int:
'''simple docstring'''
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
A__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCamelCase ( self , lowercase , lowercase=None , lowercase="This is a sound of {}." ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(lowercase ).content
else:
with open(lowercase , "rb" ) as f:
A__ = f.read()
if isinstance(lowercase , lowercase ):
A__ = ffmpeg_read(lowercase , self.feature_extractor.sampling_rate )
if not isinstance(lowercase , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
A__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
A__ = candidate_labels
A__ = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A__ = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = model_inputs.pop("candidate_labels" )
A__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowercase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**lowercase , **lowercase )
A__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = model_outputs.pop("candidate_labels" )
A__ = model_outputs["logits"][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
A__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result
| 514 |
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int = 3 ) -> list:
'''simple docstring'''
A__ = min(SCREAMING_SNAKE_CASE_ )
A__ = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int = 3 ) -> list:
'''simple docstring'''
A__ = mean(SCREAMING_SNAKE_CASE_ )
A__ = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
| 514 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = IFPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict , lowercase_ : int=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(UpperCamelCase__)
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
SCREAMING_SNAKE_CASE_ : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_ : Optional[int] = IFImgaImgPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_ : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_ : Tuple = IFInpaintingPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_ : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : List[str] = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__)
def _A () -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 712 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str=3 , lowercase_ : Dict=7 , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Dict=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Any=512 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Dict = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FalconModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconModel(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE_ : Any = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE_ : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = FalconModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE_ : Optional[int] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : int = '''single_label_classification'''
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = model._convert_to_rw_cache(result.past_key_values)
SCREAMING_SNAKE_CASE_ : int = model._convert_cache_to_standard_format(lowercase_ , lowercase_)
for layer in range(len(lowercase_)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''multi_label_classification'''
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , '''use_cache'''):
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_).to(lowercase_)
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE_ : Tuple = (
getattr(lowercase_ , '''decoder_layers''' , lowercase_)
or getattr(lowercase_ , '''num_decoder_layers''' , lowercase_)
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE_ : str = getattr(lowercase_ , '''num_kv_heads''' , config.num_attention_heads)
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowercase_ , '''d_model''' , config.hidden_size)
SCREAMING_SNAKE_CASE_ : Dict = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE_ : int = outputs['''past_key_values''']
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = inputs['''input_ids'''].shape
for i in range(lowercase_):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE_ : Dict = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowercase_)[0]
self.assertEqual(lowercase_ , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(device=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 176 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCAmelCase_ = ['small', 'medium', 'large']
UpperCAmelCase_ = 'lm_head.decoder.weight'
UpperCAmelCase_ = 'lm_head.weight'
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = d.pop(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCAmelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCAmelCase_ = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
UpperCAmelCase_ = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 603 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=99 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : str=64 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=5_12 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = MPNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = MPNetForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MPNetForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MPNetForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MPNetForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : List[str] = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = MPNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_UpperCAmelCase )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
UpperCAmelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase__ = model(_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 603 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : Union[int, Iterable[int]] , snake_case__ : bool , snake_case__ : int ):
def constraint_to_multiple_of(snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=None ):
A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A = math.floor(val / multiple ) * multiple
if x < min_val:
A = math.ceil(val / multiple ) * multiple
return x
A = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size
A , A = get_image_size(snake_case__ )
A , A = output_size
# determine new height and width
A = output_height / input_height
A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A = scale_width
else:
# fit height
A = scale_height
A = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ )
A = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ )
return (new_height, new_width)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = False ,A_ : int = 1 ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Any ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'height': 384, 'width': 384}
A = get_size_dict(A_ )
A = do_resize
A = size
A = keep_aspect_ratio
A = ensure_multiple_of
A = resample
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : bool = False ,A_ : int = 1 ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
A = get_resize_output_image_size(
A_ ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=A_ ,multiple=A_ ,)
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ,) -> Tuple:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Tuple ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : ImageInput ,A_ : bool = None ,A_ : int = None ,A_ : bool = None ,A_ : int = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : ChannelDimension = ChannelDimension.FIRST ,**A_ : Optional[Any] ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ )
A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A = resample if resample is not None else self.resample
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : List[Tuple] = None ) -> Tuple:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> Tuple:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427 |
import math
from numpy import inf
from scipy.integrate import quad
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 563 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[int] = emb.weight.shape
_UpperCamelCase : str = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
_UpperCamelCase : int = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = {}
for old_key in state_dict.keys():
_UpperCamelCase : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCamelCase : Any = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
_UpperCamelCase : Union[str, Any] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_UpperCamelCase : List[Any] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_UpperCamelCase : List[Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_UpperCamelCase : Dict = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_UpperCamelCase : Union[str, Any] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_UpperCamelCase : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_UpperCamelCase : Tuple = key.replace('final_layer_norm' , 'ff_layer_norm' )
_UpperCamelCase : Tuple = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = WEIGHTS_NAME ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = []
_UpperCamelCase : Optional[int] = 0
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
for expert in range(UpperCAmelCase_ ):
_UpperCamelCase : int = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ )['model']
remove_ignore_keys_(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = rename_fairseq_keys(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = os.path.join(
UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(UpperCAmelCase_ )[0]].dtype )
# Add the last block
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
_UpperCamelCase : List[str] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = rename_fairseq_keys(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(UpperCAmelCase_ ) == 1:
_UpperCamelCase : List[str] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
# Otherwise, let's build the index
_UpperCamelCase : Union[str, Any] = {}
for idx, shard in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin''' )
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
for key in shard:
_UpperCamelCase : Optional[Any] = shard_file
# Add the metadata
_UpperCamelCase : Tuple = {'total_size': total_size}
_UpperCamelCase : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' , encoding='utf-8' ) as f:
_UpperCamelCase : int = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '\n'
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ , lowerCAmelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
lowerCAmelCase__ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 * 4 , _SCREAMING_SNAKE_CASE=32 * 6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=32 , ) -> List[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = is_training
__UpperCamelCase = use_auxiliary_loss
__UpperCamelCase = num_queries
__UpperCamelCase = num_channels
__UpperCamelCase = min_size
__UpperCamelCase = max_size
__UpperCamelCase = num_labels
__UpperCamelCase = mask_feature_size
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
__UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
__UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase( self ) -> str:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
__UpperCamelCase = output.encoder_hidden_states
__UpperCamelCase = output.pixel_decoder_hidden_states
__UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
with torch.no_grad():
__UpperCamelCase = MaskFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
__UpperCamelCase = MaskFormerForInstanceSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCamelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __lowercase( self ) -> Any:
__UpperCamelCase = MaskFormerModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
self.config_tester.run_common_tests()
def __lowercase( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __lowercase( self ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __lowercase( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __lowercase( self ) -> str:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __lowercase( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase( self ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase( self ) -> List[str]:
pass
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def __lowercase( self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__UpperCamelCase = MaskFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = (self.model_tester.min_size,) * 2
__UpperCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
__UpperCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def __lowercase( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def __lowercase( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case = 1E-4
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase( self ) -> int:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __lowercase( self ) -> Tuple:
__UpperCamelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCamelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
__UpperCamelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__UpperCamelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
__UpperCamelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> str:
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__UpperCamelCase = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']]
__UpperCamelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']]
with torch.no_grad():
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 383 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = 'base_with_context'
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = ly_weight['self_attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = ly_weight['MultiHeadDotProductAttention_0']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase = jnp.tree_util.tree_map(onp.array , __lowercase )
__UpperCamelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__UpperCamelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__UpperCamelCase = inference.parse_training_gin_file(__lowercase , __lowercase )
__UpperCamelCase = inference.InferenceModel(args.checkpoint_path , __lowercase )
__UpperCamelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase )
__UpperCamelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase )
__UpperCamelCase = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase )
__UpperCamelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_snake_case = parser.parse_args()
main(args)
| 383 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
A = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
A = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
A = field(
default=10_24 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
A = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
A = field(default=lowerCAmelCase__ , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def snake_case_ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
lowerCAmelCase__ :List[Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ :Optional[int] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case__ ( ):
lowerCAmelCase__ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ :Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ :Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ :Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ :Dict = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ :List[str] = data_args.train_file.split("." )[-1]
lowerCAmelCase__ :Dict = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ :List[Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
lowerCAmelCase__ :Optional[int] = load_dataset("csv" , data_files=_A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ :int = load_dataset("json" , data_files=_A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ :List[str] = raw_datasets["train"].features["label"].names
lowerCAmelCase__ :Optional[int] = len(_A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ :int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ :Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_A , )
lowerCAmelCase__ :Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ :str = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ :int = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ :Optional[Any] = {"Refused": 0, "Entailed": 1}
lowerCAmelCase__ :Tuple = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowerCAmelCase__ :str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(UpperCAmelCase : Optional[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(UpperCAmelCase : Any ):
lowerCAmelCase__ :Dict = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
lowerCAmelCase__ :Optional[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ :Optional[Any] = examples["statement"]
lowerCAmelCase__ :Tuple = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
lowerCAmelCase__ :int = tokenizer(_A , _A , padding=_A , max_length=_A , truncation=_A )
lowerCAmelCase__ :Any = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
lowerCAmelCase__ :Tuple = raw_datasets.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowerCAmelCase__ :str = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowerCAmelCase__ :List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowerCAmelCase__ :str = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowerCAmelCase__ :int = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
lowerCAmelCase__ :int = raw_datasets["test"]
if data_args.max_predict_samples is not None:
lowerCAmelCase__ :Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_A ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase : EvalPrediction ):
lowerCAmelCase__ :str = p.predictions[0] if isinstance(p.predictions , _A ) else p.predictions
lowerCAmelCase__ :Union[str, Any] = np.argmax(_A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ :int = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ :Optional[int] = DataCollatorWithPadding(_A , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ :str = None
# Initialize our Trainer
lowerCAmelCase__ :Any = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
lowerCAmelCase__ :List[str] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ :Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ :Optional[Any] = last_checkpoint
lowerCAmelCase__ :Optional[int] = trainer.train(resume_from_checkpoint=_A )
lowerCAmelCase__ :Optional[Any] = train_result.metrics
lowerCAmelCase__ :Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
lowerCAmelCase__ :Tuple = min(_A , len(_A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ :Optional[Any] = trainer.evaluate(eval_dataset=_A )
lowerCAmelCase__ :str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
lowerCAmelCase__ :str = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ :List[str] = predict_dataset.remove_columns("label" )
lowerCAmelCase__ :int = trainer.predict(_A , metric_key_prefix="predict" ).predictions
lowerCAmelCase__ :Optional[Any] = np.argmax(_A , axis=1 )
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_A , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_A ):
lowerCAmelCase__ :Dict = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
lowerCAmelCase__ :Union[str, Any] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
def snake_case__ ( UpperCAmelCase : List[str] ):
main()
if __name__ == "__main__":
main()
| 709 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : List[Any] = 16
_a : Tuple = 32
def snake_case__ ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 1_6 ):
lowerCAmelCase__ :Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ :Any = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ :Any = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ :List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ :Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ :Optional[int] = 8
else:
lowerCAmelCase__ :List[Any] = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ :Dict = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : List[Any] = mocked_dataloaders # noqa: F811
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
lowerCAmelCase__ :Tuple = 2
# New Code #
lowerCAmelCase__ :Tuple = int(args.gradient_accumulation_steps )
lowerCAmelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ :Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ :Union[str, Any] = config["lr"]
lowerCAmelCase__ :int = int(config["num_epochs"] )
lowerCAmelCase__ :List[Any] = int(config["seed"] )
lowerCAmelCase__ :int = int(config["batch_size"] )
lowerCAmelCase__ :Tuple = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ :Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ :Optional[int] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Tuple = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
lowerCAmelCase__ :List[str] = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=UpperCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ :Tuple = parser.parse_args()
lowerCAmelCase__ :Any = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 111 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
snake_case = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Dict:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
snake_case = parser.parse_args()
if args.check_lib:
snake_case = importlib.import_module("""transformers""")
snake_case = Path(transformers_module.__file__).parent
else:
snake_case = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""") | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
])
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit') else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
])
return rename_keys
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
for i in range(config.num_hidden_layers):
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''')
SCREAMING_SNAKE_CASE = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = val
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE = 1000
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset') , 'r'))
SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(deit_name[-6:-4])
SCREAMING_SNAKE_CASE = int(deit_name[-3:])
# size of the architecture
if deit_name[9:].startswith('tiny'):
SCREAMING_SNAKE_CASE = 192
SCREAMING_SNAKE_CASE = 768
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small'):
SCREAMING_SNAKE_CASE = 384
SCREAMING_SNAKE_CASE = 1536
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base'):
pass
elif deit_name[4:].startswith('large'):
SCREAMING_SNAKE_CASE = 1024
SCREAMING_SNAKE_CASE = 4096
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase)
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# load HuggingFace model
SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(_UpperCAmelCase).eval()
model.load_state_dict(_UpperCAmelCase)
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE = int(
(256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=_UpperCAmelCase , crop_size=config.image_size)
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt')
SCREAMING_SNAKE_CASE = encoding['pixel_values']
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = timm_model(_UpperCAmelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3)
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_UpperCAmelCase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 709 |
from __future__ import annotations
import time
a_ : Tuple = list[tuple[int, int]]
a_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = parent
class _snake_case :
def __init__( self , a , a) -> Dict:
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.node_queue:
SCREAMING_SNAKE_CASE = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(a)
SCREAMING_SNAKE_CASE = self.get_successors(a)
for node in successors:
self.node_queue.append(a)
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a) -> list[Node]:
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(a) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a))
return successors
def SCREAMING_SNAKE_CASE__ ( self , a) -> Path:
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0)
SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
a , a)
SCREAMING_SNAKE_CASE = current_bwd_node
SCREAMING_SNAKE_CASE = current_fwd_node
SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(a),
self.bwd_bfs: self.bwd_bfs.get_successors(a),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Path:
SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(a)
SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(a)
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ : Union[str, Any] = (0, 0)
a_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ : Optional[Any] = time.time()
a_ : Union[str, Any] = BreadthFirstSearch(init, goal)
a_ : Optional[Any] = bfs.search()
a_ : List[str] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ : Optional[int] = time.time()
a_ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
a_ : List[Any] = bd_bfs.search()
a_ : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 444 | 0 |
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return np.maximum(0 ,_lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 569 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=a__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A_ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A_ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self ):
A_ : str = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : List[str] = inputs["""prompt"""]
A_ : str = inputs["""generator"""]
A_ : Tuple = inputs["""num_inference_steps"""]
A_ : Optional[int] = inputs["""output_type"""]
if "image" in inputs:
A_ : List[str] = inputs["""image"""]
else:
A_ : Optional[int] = None
if "mask_image" in inputs:
A_ : int = inputs["""mask_image"""]
else:
A_ : str = None
if "original_image" in inputs:
A_ : List[Any] = inputs["""original_image"""]
else:
A_ : int = None
A_ , A_ : Optional[int] = pipe.encode_prompt(a__ )
# inputs with prompt converted to embeddings
A_ : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : str = image
if mask_image is not None:
A_ : Dict = mask_image
if original_image is not None:
A_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : Union[str, Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
A_ : Optional[int] = self.get_dummy_inputs(a__ )
A_ : int = inputs["""generator"""]
A_ : List[Any] = inputs["""num_inference_steps"""]
A_ : Dict = inputs["""output_type"""]
# inputs with prompt converted to embeddings
A_ : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A_ : Any = image
if mask_image is not None:
A_ : Optional[int] = mask_image
if original_image is not None:
A_ : int = original_image
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Optional[int] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
def _lowerCamelCase ( self ):
A_ : Dict = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Any = self.get_dummy_inputs(a__ )
A_ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
A_ : List[Any] = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A_ : Optional[Any] = self.get_dummy_inputs(a__ )
A_ : Optional[Any] = pipe_loaded(**a__ )[0]
A_ : Union[str, Any] = np.abs(to_np(a__ ) - to_np(a__ ) ).max()
self.assertLess(a__ , 1E-4 )
| 569 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Any ) -> List[Any]:
"""simple docstring"""
try:
with open(lowercase , "rb" ) as flax_state_f:
snake_case : List[str] = from_bytes(lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase , lowercase )
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
snake_case : str = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa , lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
snake_case : str = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase )
snake_case : Optional[Any] = ""
snake_case : List[str] = flatten_dict(lowercase , sep="." )
snake_case : Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
snake_case : Union[str, Any] = []
snake_case : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case : Tuple = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
snake_case : str = flax_key_tuple_array[:-1] + ["weight"]
snake_case : Optional[Any] = jnp.transpose(lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
snake_case : str = flax_key_tuple_array[:-1] + ["weight"]
snake_case : Dict = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
snake_case : Dict = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase ):
snake_case : Optional[int] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
snake_case : Tuple = ".".join(lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
snake_case : Any = np.asarray(lowercase ) if not isinstance(lowercase , np.ndarray ) else flax_tensor
snake_case : Optional[int] = torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
snake_case : Optional[Any] = list(lowercase )
if len(lowercase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(lowercase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
return pt_model
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 117 | 0 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = [0] * len(a )
for i in range(1 , len(a ) ):
# use last results for better performance - dynamic programming
__a = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__a = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__a = j
return prefix_result
def _lowerCamelCase( a ):
return max(prefix_function(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 | """simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__:List[str] = logging.getLogger(__name__)
def _lowerCamelCase( ):
__a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=a , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=a , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=a , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=a , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=a , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=a , type=a , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=a , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=a , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__a = parser.parse_args()
return args
def _lowerCamelCase( a ):
def fn(a ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase( a ):
__a = []
for i in range(len(tokenized_data["input_ids"] ) ):
__a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__a = tf.train.Features(feature=a )
__a = tf.train.Example(features=a )
__a = example.SerializeToString()
records.append(a )
return records
def _lowerCamelCase( a ):
__a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a = min(len(a ) , args.limit )
__a = dataset.select(range(a ) )
print(F"Limiting the dataset to {args.limit} entries." )
__a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
__a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a = tokenize_function(a )
__a = dataset.map(a , batched=a , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a ):
# Concatenate all texts.
__a = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a = dataset_tokenized.map(a , batched=a , batch_size=1_0_0_0 , num_proc=4 )
__a = 0
__a = 0
for shard in range(0 , len(a ) , args.shard_size ):
__a = grouped_dataset[shard : shard + args.shard_size]
__a = len(dataset_snapshot["input_ids"] )
__a = os.path.join(a , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__a = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
__a = serialized_examples[i]
out_file.write(a )
print("Wrote file {} containing {} records".format(a , a ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = parse_args()
main(args)
| 528 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a= '''docs/source/en/_toctree.yml'''
def _UpperCamelCase ( _a : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = defaultdict(lowerCAmelCase__ )
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase__ )
__UpperCamelCase : Any = new_doc_list
__UpperCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__UpperCamelCase : List[str] = []
for duplicate_key in duplicates:
__UpperCamelCase : Union[str, Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__UpperCamelCase : int = sorted(lowerCAmelCase__ , key=lambda _a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def _UpperCamelCase ( _a : Dict=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
__UpperCamelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase : int = content[api_idx]['sections']
# Then to the model doc
__UpperCamelCase : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__UpperCamelCase : Union[str, Any] = clean_doc_toc(lowerCAmelCase__ )
__UpperCamelCase : Any = False
if new_scheduler_doc != scheduler_doc:
__UpperCamelCase : Optional[int] = True
if overwrite:
__UpperCamelCase : Optional[int] = new_scheduler_doc
if diff:
if overwrite:
__UpperCamelCase : Optional[Any] = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _UpperCamelCase ( _a : Tuple=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
__UpperCamelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
__UpperCamelCase : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = api_doc[pipeline_idx]['sections']
__UpperCamelCase : Dict = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCamelCase : str = pipeline_doc['section']
__UpperCamelCase : str = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
__UpperCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
__UpperCamelCase : List[str] = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
__UpperCamelCase : Optional[Any] = True
if overwrite:
__UpperCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__UpperCamelCase : Tuple = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a= argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a= parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 721 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCAmelCase ( self , _lowerCamelCase ):
return "lower newer", "lower newer"
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : List[Any] = 'lower'
__UpperCamelCase : Union[str, Any] = ['low', 'er</w>']
__UpperCamelCase : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : List[str] = tokens + ['<unk>']
__UpperCamelCase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__UpperCamelCase : int = 'This is a simple input'
__UpperCamelCase : Any = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] = ('This is a simple input', 'This is a pair')
__UpperCamelCase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
def lowerCAmelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
pass
| 287 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 46 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Tuple = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Dict = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 229 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =CLIPTokenizer
_lowerCamelCase =CLIPTokenizerFast
_lowerCamelCase =True
_lowerCamelCase ={}
_lowerCamelCase =False
def __snake_case ( self : Any ):
super().setUp()
# fmt: off
UpperCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( self : int , **a__ : Tuple ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , **a__ : List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , a__ : List[str] ):
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = '''lower newer'''
return input_text, output_text
def __snake_case ( self : List[Any] ):
UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
UpperCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@require_ftfy
def __snake_case ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
UpperCAmelCase = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
UpperCAmelCase = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCAmelCase = f" {text}"
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
def __snake_case ( self : List[str] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __snake_case ( self : str ):
super().test_tokenization_python_rust_equals()
def __snake_case ( self : Dict ):
# CLIP always lower cases letters
pass
| 720 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a__ : int = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase ="dummy_data"
_lowerCamelCase ="datasets"
_lowerCamelCase =False
def __init__( self : str , a__ : str , a__ : str , a__ : Union[Version, str] , a__ : Optional[str] = None , a__ : bool = False , a__ : bool = True , a__ : Optional[List[Callable]] = None , ):
UpperCAmelCase = 0
UpperCAmelCase = dataset_name
UpperCAmelCase = cache_dir
UpperCAmelCase = use_local_dummy_data
UpperCAmelCase = config
# download_callbacks take a single url as input
UpperCAmelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase = str(a__ )
# to be downloaded
UpperCAmelCase = None
UpperCAmelCase = None
@property
def __snake_case ( self : int ):
if self._dummy_file is None:
UpperCAmelCase = self.download_dummy_data()
return self._dummy_file
@property
def __snake_case ( self : int ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __snake_case ( self : List[str] ):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase = cached_path(
a__ , cache_dir=self.cache_dir , extract_compressed_file=a__ , force_extract=a__ )
return os.path.join(a__ , self.dummy_file_name )
@property
def __snake_case ( self : Any ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __snake_case ( self : int ):
if self._bucket_url is None:
UpperCAmelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __snake_case ( self : Optional[Any] ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __snake_case ( self : int , a__ : List[str] , *a__ : List[Any] ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a__ , a__ ):
return self.create_dummy_data_dict(a__ , a__ )
elif isinstance(a__ , (list, tuple) ):
return self.create_dummy_data_list(a__ , a__ )
else:
return self.create_dummy_data_single(a__ , a__ )
def __snake_case ( self : Optional[int] , a__ : Union[str, Any] , *a__ : List[Any] ):
return self.download_and_extract(a__ )
def __snake_case ( self : str , a__ : List[Any] , a__ : Union[str, Any] ):
return self.download_and_extract(a__ )
def __snake_case ( self : Optional[Any] , a__ : Dict , *a__ : str , **a__ : List[Any] ):
return path
def __snake_case ( self : List[str] ):
return {}
def __snake_case ( self : Tuple , a__ : int , a__ : Dict ):
UpperCAmelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a__ , a__ ):
for single_url in single_urls:
download_callback(a__ )
else:
UpperCAmelCase = single_urls
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a__ , a__ ):
UpperCAmelCase = [os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) ) for x in single_urls]
else:
UpperCAmelCase = single_urls
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) )
UpperCAmelCase = value
# make sure that values are unique
if all(isinstance(a__ , a__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __snake_case ( self : Union[str, Any] , a__ : int , a__ : str ):
UpperCAmelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , a__ ) ) for url in data_url )
UpperCAmelCase = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase = [data_url[0]] * len(a__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(a__ )
return dummy_data_list
def __snake_case ( self : List[str] , a__ : Union[str, Any] , a__ : str ):
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase = os.path.join(a__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(a__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Optional[Any] ):
pass
def __snake_case ( self : List[Any] , a__ : Optional[Any] ):
def _iter_archive_members(a__ : int ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase = Path(self.dummy_file ).parent
UpperCAmelCase = path.relative_to(a__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a__ )
UpperCAmelCase = Path(a__ )
UpperCAmelCase = _iter_archive_members(a__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(a__ ).as_posix(), file_path.open('''rb''' )
def __snake_case ( self : Optional[int] , a__ : Union[str, Any] ):
if not isinstance(a__ , a__ ):
UpperCAmelCase = [paths]
for path in paths:
if os.path.isfile(a__ ):
if os.path.basename(a__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a__ ):
if os.path.basename(a__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(a__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(a__ , a__ )
| 570 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def lowercase_ ( self : str , lowercase__ : Dict ):
'''simple docstring'''
a_ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
a_ : int = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys]
a_ : Dict = Counter(lowercase__ )
a_ : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def lowercase_ ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict=False ):
'''simple docstring'''
a_ : List[Any] = super().construct_mapping(lowercase__ , deep=lowercase__ )
self._check_no_duplicates_on_constructed_node(lowercase__ )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : str ):
"""simple docstring"""
a_ : Any = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
a_ : str = full_content[1:].index("""---""" ) + 1
a_ : Optional[int] = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( snake_case_ ):
# class attributes
__magic_name__ : Any = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ ( cls : Optional[int] , lowercase__ : Path ):
'''simple docstring'''
with open(lowercase__ , encoding="""utf-8""" ) as readme_file:
a_ , a_ : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase__ )
else:
return cls()
def lowercase_ ( self : List[Any] , lowercase__ : Path ):
'''simple docstring'''
if path.exists():
with open(lowercase__ , encoding="""utf-8""" ) as readme_file:
a_ : int = readme_file.read()
else:
a_ : Any = None
a_ : Optional[int] = self._to_readme(lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowercase__ )
def lowercase_ ( self : Optional[Any] , lowercase__ : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
a_ , a_ : Dict = _split_yaml_from_readme(lowercase__ )
a_ : Optional[Any] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
a_ : Any = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowercase_ ( cls : List[str] , lowercase__ : str ):
'''simple docstring'''
a_ : int = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
a_ : Union[str, Any] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase__ )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding="""utf-8""" , ).decode("""utf-8""" )
lowerCAmelCase_ : str = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCAmelCase_ : Tuple = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowerCAmelCase_ : List[str] = ap.parse_args()
lowerCAmelCase_ : str = Path(args.readme_filepath)
lowerCAmelCase_ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 442 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Optional[Any] = BertJapaneseTokenizer
__magic_name__ : int = False
__magic_name__ : Tuple = True
def lowercase_ ( self : str ):
'''simple docstring'''
super().setUp()
a_ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase_ ( self : List[Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
a_ : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
a_ : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowercase_ ( self : Optional[int] , lowercase__ : str ):
'''simple docstring'''
a_ , a_ : int = self.get_input_output_texts(lowercase__ )
a_ : Tuple = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ : Union[str, Any] = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__ )
return text, ids
def lowercase_ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Tuple = self.tokenizer_class(self.vocab_file )
a_ : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(lowercase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowercase__ )
a_ : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
a_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a_ : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(lowercase__ , lowercase__ )
with open(lowercase__ , """rb""" ) as handle:
a_ : Optional[int] = pickle.load(lowercase__ )
a_ : Tuple = tokenizer_new.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : List[str] = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
a_ : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowercase_ ( self : str ):
'''simple docstring'''
try:
a_ : Union[str, Any] = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : Union[str, Any] = MecabTokenizer(do_lower_case=lowercase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
a_ : Any = MecabTokenizer(
do_lower_case=lowercase__ , normalize_text=lowercase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : str = MecabTokenizer(normalize_text=lowercase__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowercase__ )
a_ : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。"""
a_ : Optional[int] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a_ : str = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(lowercase__ , lowercase__ )
with open(lowercase__ , """rb""" ) as handle:
a_ : List[Any] = pickle.load(lowercase__ )
a_ : Optional[int] = tokenizer_new.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@require_sudachi
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[Any] = SudachiTokenizer(do_lower_case=lowercase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Tuple = SudachiTokenizer(normalize_text=lowercase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : Union[str, Any] = SudachiTokenizer(trim_whitespace=lowercase__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowercase__ )
a_ : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。"""
a_ : Union[str, Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a_ : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(lowercase__ , lowercase__ )
with open(lowercase__ , """rb""" ) as handle:
a_ : Any = pickle.load(lowercase__ )
a_ : List[Any] = tokenizer_new.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@require_jumanpp
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : str = JumanppTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : List[Any] = JumanppTokenizer(normalize_text=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : Optional[int] = JumanppTokenizer(trim_whitespace=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowercase_ ( self : Any ):
'''simple docstring'''
a_ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
a_ : int = {}
for i, token in enumerate(lowercase__ ):
a_ : str = i
a_ : int = WordpieceTokenizer(vocab=lowercase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : Optional[int] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
a_ : List[str] = tokenizer.subword_tokenizer
a_ : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(lowercase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
a_ : Optional[Any] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(lowercase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : Union[str, Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
a_ : Union[str, Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowercase__ )
a_ : Any = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowercase__ )
a_ : str = tokenizer.build_inputs_with_special_tokens(lowercase__ )
a_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Optional[Any] = BertJapaneseTokenizer
__magic_name__ : int = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a_ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
a_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase_ ( self : str , **lowercase__ : Tuple ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowercase__ )
def lowercase_ ( self : str , lowercase__ : Union[str, Any] ):
'''simple docstring'''
a_ : Optional[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
a_ : List[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
a_ : Tuple = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
lowercase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
a_ : Dict = {}
for i, token in enumerate(lowercase__ ):
a_ : List[Any] = i
a_ : Dict = CharacterTokenizer(vocab=lowercase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
a_ : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowercase__ )
a_ : List[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowercase__ )
a_ : str = tokenizer.build_inputs_with_special_tokens(lowercase__ )
a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[Any] = """cl-tohoku/bert-base-japanese"""
a_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Any = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowercase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
a_ : Optional[Any] = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowercase__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 442 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_lowercase).to(_lowercase)
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCAmelCase : Dict = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : str = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : int = model(input_ids.to(_lowercase) , labels=labels.to(_lowercase)).loss
__UpperCAmelCase : Union[str, Any] = -(labels.shape[-1] * loss.item())
__UpperCAmelCase : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = VideoMAEConfig()
set_architecture_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "finetuned" not in model_name:
__lowercase = False
if "finetuned" in model_name:
__lowercase = "huggingface/label-files"
if "kinetics" in model_name:
__lowercase = 4_0_0
__lowercase = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__lowercase = 1_7_4
__lowercase = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if "small" in model_name:
__lowercase = 3_8_4
__lowercase = 1_5_3_6
__lowercase = 1_2
__lowercase = 1_6
__lowercase = 1_2
__lowercase = 3
__lowercase = 1_9_2
__lowercase = 7_6_8
elif "large" in model_name:
__lowercase = 1_0_2_4
__lowercase = 4_0_9_6
__lowercase = 2_4
__lowercase = 1_6
__lowercase = 1_2
__lowercase = 8
__lowercase = 5_1_2
__lowercase = 2_0_4_8
elif "huge" in model_name:
__lowercase = 1_2_8_0
__lowercase = 5_1_2_0
__lowercase = 3_2
__lowercase = 1_6
__lowercase = 1_2
__lowercase = 8
__lowercase = 6_4_0
__lowercase = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "encoder." in name:
__lowercase = name.replace("encoder." , "" )
if "cls_token" in name:
__lowercase = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__lowercase = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__lowercase = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__lowercase = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__lowercase = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__lowercase = name.replace("attn" , "attention.self" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__lowercase = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__lowercase = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__lowercase = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowercase = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowercase = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__lowercase = name.replace("head" , "classifier" )
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("encoder." ):
__lowercase = key.replace("encoder." , "" )
if "qkv" in key:
__lowercase = key.split("." )
if key.startswith("decoder.blocks" ):
__lowercase = config.decoder_hidden_size
__lowercase = int(key_split[2] )
__lowercase = "decoder.decoder_layers."
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = config.hidden_size
__lowercase = int(key_split[1] )
__lowercase = "videomae.encoder.layer."
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( ):
__lowercase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowercase = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = get_videomae_config(_SCREAMING_SNAKE_CASE )
if "finetuned" in model_name:
__lowercase = VideoMAEForVideoClassification(_SCREAMING_SNAKE_CASE )
else:
__lowercase = VideoMAEForPreTraining(_SCREAMING_SNAKE_CASE )
# download original checkpoint, hosted on Google Drive
__lowercase = "pytorch_model.bin"
gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE )
__lowercase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
if "model" in files:
__lowercase = files["model"]
else:
__lowercase = files["module"]
__lowercase = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify model on basic input
__lowercase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowercase = prepare_video()
__lowercase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
if "finetuned" not in model_name:
__lowercase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__lowercase = torch.load(_SCREAMING_SNAKE_CASE )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
__lowercase = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowercase = torch.Size([1, 4_0_0] )
__lowercase = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowercase = torch.Size([1, 1_7_4] )
__lowercase = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowercase = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowercase = torch.Size([1, 4_0_0] )
__lowercase = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowercase = torch.Size([1, 4_0_0] )
__lowercase = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowercase = torch.Size([1, 4_0_0] )
__lowercase = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowercase = torch.Size([1, 4_0_0] )
__lowercase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowercase = torch.Size([1, 1_7_4] )
__lowercase = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowercase = torch.Size([1, 1_7_4] )
__lowercase = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowercase = outputs.loss
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 402 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__UpperCamelCase : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__UpperCamelCase : int = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__UpperCamelCase : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = LEDTokenizer
lowerCamelCase : Optional[Any] = LEDTokenizerFast
lowerCamelCase : Any = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase_ : List[str] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def UpperCAmelCase__ (self , **A ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ (self ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def UpperCAmelCase__ (self ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase_ : List[Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : int = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : int = tokenizer(A , padding=A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A )
self.assertIn('''attention_mask''' , A )
self.assertNotIn('''labels''' , A )
self.assertNotIn('''decoder_attention_mask''' , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : Dict = tokenizer(text_target=A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCAmelCase__ (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : Optional[Any] = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=A , truncation=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = ['''A long paragraph for summarization.''']
lowerCamelCase_ : Optional[int] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : str = tokenizer(A , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = tokenizer(text_target=A , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = inputs['''input_ids''']
lowerCamelCase_ : Tuple = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase__ (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ : Tuple = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase_ : Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase_ : int = tokenizer(A , padding=A )
lowerCamelCase_ : int = [[0] * len(A ) for x in encoded_output['''input_ids''']]
lowerCamelCase_ : int = tokenizer.pad(A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase_ : Any = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase_ : Any = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase_ : Union[str, Any] = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase_ : Union[str, Any] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase_ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 705 |
'''simple docstring'''
from collections import namedtuple
__lowercase : List[Any] = namedtuple('''from_to''', '''from_ to''')
__lowercase : Optional[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(_lowercase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(_lowercase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | 0 |
def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : bool = False ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase : str = f'''Expected string as input, found {type(__magic_name__ )}'''
raise ValueError(__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase : Optional[int] = f'''Expected boolean as use_pascal parameter, found {type(__magic_name__ )}'''
raise ValueError(__magic_name__ )
lowerCAmelCase : str = input_str.split('''_''' )
lowerCAmelCase : Optional[int] = 0 if use_pascal else 1
lowerCAmelCase : Optional[Any] = words[start_index:]
lowerCAmelCase : Optional[int] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase : Optional[Any] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 348 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def _A ( self : Dict ):
lowerCAmelCase : Optional[int] = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : List[str] = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCAmelCase : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase : Any = model.generate(**lowerCamelCase__ )
lowerCAmelCase : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase : Any = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
def _A ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase : int = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 348 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case__ : List[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ['''MobileViTFeatureExtractor''']
snake_case__ : Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 | '''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = logging.get_verbosity()
UpperCAmelCase_ : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : int = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
UpperCAmelCase_ : List[Any] = logging.log_levels[env_level_str]
UpperCAmelCase_ : Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCAmelCase_ : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Tuple = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 389 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class A :
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=512 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]="None" , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Any=None , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = relative_attention
lowercase__ = position_biased_input
lowercase__ = pos_att_type
lowercase__ = scope
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFDebertaVaModel(config=_UpperCAmelCase )
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
lowercase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
lowercase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
lowercase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
lowercase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def lowerCamelCase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFDebertaVaModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowercase__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 )
| 15 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ) -> List[str]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''mock-s3-bucket'''
snake_case : Optional[Any] = F"s3://{mock_bucket}"
snake_case : Tuple = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
snake_case : List[Any] = '''./local/path'''
snake_case : Any = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Any = is_remote_filesystem(__magic_name__ )
assert is_remote is True
snake_case : Dict = fsspec.filesystem('''file''' )
snake_case : List[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case : Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case : Any = F"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
snake_case : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
snake_case : List[Any] = os.path.basename(__magic_name__ )
snake_case : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case : Optional[int] = compressed_file_paths[protocol]
snake_case : str = '''dataset.jsonl'''
snake_case : List[Any] = F"{protocol}://{member_file_path}::{compressed_file_path}"
snake_case , *snake_case : str = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
snake_case : List[str] = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case : List[str] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 598 | 0 |
import os
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: int =len(grid[0] )
lowerCamelCase__: Optional[Any] =len(__a )
lowerCamelCase__: Tuple =0
lowerCamelCase__: List[str] =0
lowerCamelCase__: Tuple =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__a ):
for j in range(n_rows - 3 ):
lowerCamelCase__: Optional[int] =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase__: Dict =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase__: str =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase__: Optional[Any] =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase__: List[Any] =max(
__a , __a , __a , __a )
if max_product > largest:
lowerCamelCase__: Any =max_product
return largest
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =[]
with open(os.path.dirname(__a ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCamelCase__: int =[[int(__a ) for i in grid[j]] for j in range(len(__a ) )]
return largest_product(__a )
if __name__ == "__main__":
print(solution())
| 437 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_vision_model"
def __init__(self : Union[str, Any] , UpperCAmelCase_ : int=1_408 , UpperCAmelCase_ : List[str]=6_144 , UpperCAmelCase_ : List[Any]=39 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.0_0001 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=1E-1_0 , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Union[str, Any] =qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_qformer"
def __init__(self : str , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-1_2 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=1_408 , **UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: Tuple =position_embedding_type
lowerCamelCase__: List[Any] =cross_attention_frequency
lowerCamelCase__: Tuple =encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Tuple =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip-2"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=32 , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
if qformer_config is None:
lowerCamelCase__: str ={}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
if text_config is None:
lowerCamelCase__: Union[str, Any] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
lowerCamelCase__: Optional[Any] =BlipaVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipaQFormerConfig(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase__: Dict =CONFIG_MAPPING[text_model_type](**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.text_config.tie_word_embeddings
lowerCamelCase__: List[str] =self.text_config.is_encoder_decoder
lowerCamelCase__: Dict =num_query_tokens
lowerCamelCase__: Optional[Any] =self.vision_config.hidden_size
lowerCamelCase__: Tuple =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__: List[Any] =1.0
lowerCamelCase__: Union[str, Any] =0.02
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipaVisionConfig , UpperCAmelCase_ : BlipaQFormerConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : int , ) ->Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =copy.deepcopy(self.__dict__)
lowerCamelCase__: Any =self.vision_config.to_dict()
lowerCamelCase__: Any =self.qformer_config.to_dict()
lowerCamelCase__: Any =self.text_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 437 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple ):
# Load checkpoint
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCAmelCase = v
else:
__lowerCAmelCase = v
__lowerCAmelCase = chkpt['params']
__lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase_, (torch.FloatTensor, numpy.ndarray) )}
__lowerCAmelCase = chkpt['dico_word2id']
__lowerCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@', '' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
__lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, indent=2 ) + '\n' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, indent=2 ) + '\n' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 53 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase = random.Random()
if is_torch_available():
import torch
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
if rng is None:
snake_case__ : Any = global_rng
snake_case__ : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=400 , lowerCamelCase__=2_000 , lowerCamelCase__=1 , lowerCamelCase__=0.0 , lowerCamelCase__=16_000 , lowerCamelCase__=True , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Tuple = min_seq_length
snake_case__ : Tuple = max_seq_length
snake_case__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : int = feature_size
snake_case__ : Dict = padding_value
snake_case__ : str = sampling_rate
snake_case__ : Tuple = return_attention_mask
snake_case__ : int = do_normalize
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self , lowerCamelCase__=False , lowerCamelCase__=False) -> List[str]:
'''simple docstring'''
def _flatten(lowerCamelCase__):
return list(itertools.chain(*__lowercase))
if equal_length:
snake_case__ : int = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
snake_case__ : Dict = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
snake_case__ : Optional[int] = [np.asarray(__lowercase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( lowercase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = ASTFeatureExtractor
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = ASTFeatureExtractionTester(self)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
snake_case__ : str = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
snake_case__ : Optional[int] = [np.asarray(__lowercase) for speech_input in speech_inputs]
# Test not batched input
snake_case__ : Dict = feat_extract(speech_inputs[0] , return_tensors="np").input_values
snake_case__ : int = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3))
# Test batched
snake_case__ : Tuple = feat_extract(__lowercase , padding=__lowercase , return_tensors="np").input_values
snake_case__ : str = feat_extract(__lowercase , padding=__lowercase , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3))
# Test 2-D numpy arrays are batched.
snake_case__ : Optional[int] = [floats_list((1, x))[0] for x in (800, 800, 800)]
snake_case__ : Optional[Any] = np.asarray(__lowercase)
snake_case__ : int = feat_extract(__lowercase , return_tensors="np").input_values
snake_case__ : Any = feat_extract(__lowercase , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3))
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
import torch
snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
snake_case__ : Tuple = np.random.rand(100).astype(np.floataa)
snake_case__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
snake_case__ : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def UpperCAmelCase ( self , lowerCamelCase__) -> Tuple:
'''simple docstring'''
from datasets import load_dataset
snake_case__ : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
snake_case__ : Any = ds.sort("id").select(range(__lowercase))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
snake_case__ : Any = self._load_datasamples(1)
snake_case__ : Tuple = ASTFeatureExtractor()
snake_case__ : int = feature_extractor(__lowercase , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 1_024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __lowercase , atol=1E-4))
| 715 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase = {"""facebook/blenderbot_small-90M""": 512}
def A__ ( _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = set()
snake_case__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Any = char
snake_case__ : List[str] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="__start__" , lowerCamelCase__="__end__" , lowerCamelCase__="__unk__" , lowerCamelCase__="__null__" , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__)
with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle:
snake_case__ : int = json.load(lowerCamelCase__)
snake_case__ : List[str] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8") as merges_handle:
snake_case__ : Any = merges_handle.read().split("\n")[1:-1]
snake_case__ : Optional[int] = [tuple(merge.split()) for merge in merges]
snake_case__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : List[str] = {}
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = re.sub("([.,!?()])" , R" \1" , lowerCamelCase__)
snake_case__ : List[Any] = re.sub("(')" , R" \1 " , lowerCamelCase__)
snake_case__ : Dict = re.sub(R"\s{2,}" , " " , lowerCamelCase__)
if "\n" in token:
snake_case__ : Tuple = token.replace("\n" , " __newln__")
snake_case__ : Optional[int] = token.split(" ")
snake_case__ : int = []
for token in tokens:
if not len(lowerCamelCase__):
continue
snake_case__ : str = token.lower()
snake_case__ : List[str] = tuple(lowerCamelCase__)
snake_case__ : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
if not pairs:
words.append(lowerCamelCase__)
continue
while True:
snake_case__ : int = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Any = bigram
snake_case__ : Optional[int] = []
snake_case__ : str = 0
while i < len(lowerCamelCase__):
try:
snake_case__ : Any = word.index(lowerCamelCase__ , lowerCamelCase__)
new_word.extend(word[i:j])
snake_case__ : Tuple = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ : Optional[int] = tuple(lowerCamelCase__)
snake_case__ : str = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
snake_case__ : Tuple = "@@ ".join(lowerCamelCase__)
snake_case__ : Union[str, Any] = word[:-4]
snake_case__ : Any = word
words.append(lowerCamelCase__)
return " ".join(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = re.findall(R"\S+\n?" , lowerCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__).split(" ")))
return split_tokens
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : str = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = " ".join(lowerCamelCase__).replace("@@ " , "").strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n")
snake_case__ : str = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
snake_case__ : Tuple = token_index
writer.write(" ".join(lowerCamelCase__) + "\n")
index += 1
return vocab_file, merge_file
| 150 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( _UpperCamelCase):
def _UpperCAmelCase ( self , a=None , a=None , a=None , **a ) -> Union[str, Any]:
if tokenize_kwargs is None:
lowercase__ : int = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowercase__ : List[str] = truncation
lowercase__ : Optional[Any] = tokenize_kwargs
lowercase__ : Optional[int] = {}
if return_tensors is not None:
lowercase__ : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def _UpperCAmelCase ( self , a , **a ) -> List[str]:
lowercase__ : Dict = self.framework
lowercase__ : List[str] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
return model_inputs
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = self.model(**a_ )
return model_outputs
def _UpperCAmelCase ( self , a , a=False ) -> Optional[Any]:
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *a , **a ) -> Optional[int]:
return super().__call__(*a_ , **a_ )
| 599 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a : Tuple = get_tests_dir('''fixtures''')
a : Dict = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a : int = get_tests_dir('''fixtures/dummy-config.json''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = 0
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case = AutoFeatureExtractor.from_pretrained(a_ ).to_dict()
config_dict.pop("feature_extractor_type" )
__snake_case = WavaVecaFeatureExtractor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a_ , a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
__snake_case = AutoFeatureExtractor.from_pretrained("bert-base" )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = AutoFeatureExtractor.from_pretrained(a_ , revision="aaaaaa" )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__snake_case = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ):
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ , trust_remote_code=a_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def A ( self : int ):
"""simple docstring"""
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoFeatureExtractor.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = CustomFeatureExtractor.from_pretrained(a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A ( self : Dict ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register("custom" , a_ )
AutoFeatureExtractor.register(a_ , a_ )
# If remote code is not set, the default is to use local
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=a_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(a_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 69 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 390 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = "upernet"
def __init__( self : Dict , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=[1, 2, 3, 6] , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=0.4 , _lowerCAmelCase : Any=3_84 , _lowerCAmelCase : str=2_56 , _lowerCAmelCase : int=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=2_55 , **_lowerCAmelCase : int , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__snake_case : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[Any] = backbone_config.get("""model_type""" )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : Union[str, Any] = config_class.from_dict(_lowerCAmelCase )
__snake_case : Dict = backbone_config
__snake_case : Optional[Any] = hidden_size
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = pool_scales
__snake_case : Tuple = use_auxiliary_head
__snake_case : List[Any] = auxiliary_loss_weight
__snake_case : Union[str, Any] = auxiliary_in_channels
__snake_case : str = auxiliary_channels
__snake_case : Optional[int] = auxiliary_num_convs
__snake_case : Optional[int] = auxiliary_concat_input
__snake_case : Tuple = loss_ignore_index
def snake_case__ ( self : Any ):
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : Optional[int] = self.backbone_config.to_dict()
__snake_case : int = self.__class__.model_type
return output
| 390 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase_ = TypeVar("T")
UpperCAmelCase_ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase_ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase_ = Union[str, bytes, os.PathLike] | 32 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase ) -> Tuple:
'''simple docstring'''
with open(_UpperCamelCase , """r""" ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
_lowercase = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_lowercase = torch.device('cuda', local_rank)
_lowercase = socket.gethostname()
_lowercase = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_lowercase = dist.get_rank()
_lowercase = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 306 | 0 |
"""simple docstring"""
import numpy as np
def lowerCamelCase ( _snake_case ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=[1, 16, 4, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase__ : Union[str, Any] = (self.image_size // 32) ** 2
UpperCAmelCase__ : Union[str, Any] = num_patches + 1
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
UpperCAmelCase__ : Any = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ViTHybridModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase__ : List[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __snake_case ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Dict = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def __snake_case ( self ):
UpperCAmelCase__ : str = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
UpperCAmelCase__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
UpperCAmelCase__ : List[Any] = model(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase__ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 254 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a__ = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
a__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
a__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
a__ = requests.get(image_url).content
a__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 14 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCamelCase__ ):
a : int = ["""torch""", """scipy"""]
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
| 515 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=2_88 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_14 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
__lowercase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''bridgetower'''
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=7_68 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__="add" , lowerCAmelCase__=12 , lowerCAmelCase__=6 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
__lowercase = kwargs.pop('''text_config_dict''' , lowerCAmelCase__ )
__lowercase = kwargs.pop('''vision_config_dict''' , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
__lowercase = BridgeTowerTextConfig(**lowerCAmelCase__ )
__lowercase = BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output | 522 | import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
elif args.student_type == "gpt2":
__lowercase = False
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowercase , required=lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowercase , required=lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowercase , required=lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowercase , type=lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowercase , required=lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowercase , default=4000 , help='''Checkpoint interval.''' )
__lowercase = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.student_type]
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowercase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowercase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowercase = tokenizer.all_special_tokens.index(lowercase )
__lowercase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowercase = special_tok_ids
__lowercase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
__lowercase = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowercase = 0.0 # do not predict special tokens
__lowercase = torch.from_numpy(lowercase )
else:
__lowercase = None
__lowercase = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowercase = student_config_class.from_pretrained(args.student_config )
__lowercase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowercase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
__lowercase = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__lowercase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowercase = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main() | 522 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
import math
def _lowerCamelCase ( __A : int , __A : Tuple ) -> Optional[int]:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 717 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( __A : Optional[int] , __A : bool = False ) -> int:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Union[str, Any] = f.read()
_UpperCAmelCase : Any = content.split('''\n''' )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase : Union[str, Any] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase : List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase : Tuple = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__A ) )
elif "\n".join(__A ) != content:
return True
def _lowerCamelCase ( __A : bool = False ) -> List[str]:
_UpperCAmelCase : List[str] = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith('''.py''' )]
_UpperCAmelCase : List[Any] = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
_UpperCAmelCase : Optional[int] = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(__A )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 186 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase ( snake_case : Any , snake_case : Tuple , snake_case : Tuple , snake_case : List[Any]=None , snake_case : Dict=None , snake_case : Optional[int]=None , snake_case : str=None , snake_case : Optional[int]=None , ):
if attention_mask is None:
_lowerCAmelCase:List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase:Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase:Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case )
if decoder_head_mask is None:
_lowerCAmelCase:Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
_lowerCAmelCase:List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a__ :
def __init__( self : Dict ,a__ : Optional[int] ,a__ : Union[str, Any]=13 ,a__ : Optional[Any]=7 ,a__ : Tuple=True ,a__ : Tuple=False ,a__ : Optional[int]=99 ,a__ : List[str]=16 ,a__ : Optional[int]=2 ,a__ : Any=4 ,a__ : Union[str, Any]=4 ,a__ : int="relu" ,a__ : Union[str, Any]=0.1 ,a__ : Optional[Any]=0.1 ,a__ : Tuple=0.0 ,a__ : Optional[int]=0.0 ,a__ : List[str]=20 ,a__ : Optional[Any]=2 ,a__ : Optional[int]=1 ,a__ : Tuple=0 ,) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = parent
_lowerCAmelCase:str = batch_size
_lowerCAmelCase:Union[str, Any] = seq_length
_lowerCAmelCase:Optional[Any] = is_training
_lowerCAmelCase:int = use_labels
_lowerCAmelCase:Union[str, Any] = vocab_size
_lowerCAmelCase:Optional[Any] = hidden_size
_lowerCAmelCase:Tuple = num_hidden_layers
_lowerCAmelCase:Optional[Any] = num_attention_heads
_lowerCAmelCase:str = intermediate_size
_lowerCAmelCase:str = hidden_act
_lowerCAmelCase:Optional[Any] = hidden_dropout_prob
_lowerCAmelCase:Any = attention_probs_dropout_prob
_lowerCAmelCase:List[str] = encoder_layerdrop
_lowerCAmelCase:Any = decoder_layerdrop
_lowerCAmelCase:Union[str, Any] = max_position_embeddings
_lowerCAmelCase:Dict = eos_token_id
_lowerCAmelCase:Optional[int] = pad_token_id
_lowerCAmelCase:Optional[int] = bos_token_id
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
_lowerCAmelCase:str = self.eos_token_id # Eos Token
_lowerCAmelCase:str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase:Any = input_ids.clamp(self.pad_token_id + 1)
_lowerCAmelCase:Any = decoder_input_ids.clamp(self.pad_token_id + 1)
_lowerCAmelCase:List[Any] = self.get_config()
_lowerCAmelCase:str = prepare_mam_aaa_inputs_dict(a__ ,a__ ,a__)
return config, inputs_dict
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def __UpperCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Any = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : Optional[Any] ,a__ : List[str] ,a__ : Tuple) -> Any:
"""simple docstring"""
_lowerCAmelCase:Dict = MaMaaaModel(config=a__).get_decoder().to(a__).eval()
_lowerCAmelCase:Optional[int] = inputs_dict['''input_ids''']
_lowerCAmelCase:Any = inputs_dict['''attention_mask''']
_lowerCAmelCase:Any = inputs_dict['''head_mask''']
# first forward pass
_lowerCAmelCase:int = model(a__ ,attention_mask=a__ ,head_mask=a__ ,use_cache=a__)
_lowerCAmelCase , _lowerCAmelCase:int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase:Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size)
_lowerCAmelCase:Union[str, Any] = ids_tensor((self.batch_size, 3) ,2)
# append to next input_ids and
_lowerCAmelCase:Tuple = torch.cat([input_ids, next_tokens] ,dim=-1)
_lowerCAmelCase:str = torch.cat([attention_mask, next_attn_mask] ,dim=-1)
_lowerCAmelCase:List[str] = model(a__ ,attention_mask=a__)['''last_hidden_state''']
_lowerCAmelCase:List[str] = model(a__ ,attention_mask=a__ ,past_key_values=a__)[
'''last_hidden_state'''
]
# select random slice
_lowerCAmelCase:Dict = ids_tensor((1,) ,output_from_past.shape[-1]).item()
_lowerCAmelCase:Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase:int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ ,a__ ,atol=1E-2))
def __UpperCamelCase ( self : Dict ,a__ : Optional[Any] ,a__ : List[str]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = MaMaaaModel(config=a__).to(a__).eval()
_lowerCAmelCase:Any = model(**a__)
_lowerCAmelCase:Tuple = outputs.encoder_last_hidden_state
_lowerCAmelCase:List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase:Any = model.get_encoder()
encoder.save_pretrained(a__)
_lowerCAmelCase:Optional[Any] = MaMaaaEncoder.from_pretrained(a__).to(a__)
_lowerCAmelCase:Optional[int] = encoder(inputs_dict['''input_ids'''] ,attention_mask=inputs_dict['''attention_mask'''])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase:Optional[Any] = model.get_decoder()
decoder.save_pretrained(a__)
_lowerCAmelCase:List[str] = MaMaaaDecoder.from_pretrained(a__).to(a__)
_lowerCAmelCase:Optional[int] = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] ,attention_mask=inputs_dict['''decoder_attention_mask'''] ,encoder_hidden_states=a__ ,encoder_attention_mask=inputs_dict['''attention_mask'''] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class a__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = True
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Tuple ,a__ : Dict ,a__ : List[Any] ,a__ : Optional[Any] ,a__ : Tuple ,a__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:str = MaMaaaModelTester(self)
_lowerCAmelCase:Optional[Any] = ConfigTester(self ,config_class=a__)
def __UpperCamelCase ( self : Dict) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase:List[Any] = model_class(a__)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__)
_lowerCAmelCase , _lowerCAmelCase:int = model_class.from_pretrained(a__ ,output_loading_info=a__)
self.assertEqual(info['''missing_keys'''] ,[])
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a__)
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__)
def __UpperCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCAmelCase:Optional[int] = model_class(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[int] = copy.deepcopy(self._prepare_for_class(a__ ,a__))
if not self.is_encoder_decoder:
_lowerCAmelCase:List[Any] = inputs['''input_ids''']
del inputs["input_ids"]
else:
_lowerCAmelCase:int = inputs['''input_ids''']
_lowerCAmelCase:Union[str, Any] = inputs.get('''decoder_input_ids''' ,a__)
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' ,a__)
_lowerCAmelCase:Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCAmelCase:Tuple = wte(a__)
else:
_lowerCAmelCase:int = wte(a__)
_lowerCAmelCase:Tuple = wte(a__)
with torch.no_grad():
model(**a__)[0]
def __UpperCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase:Dict = input_dict['''input_ids''']
_lowerCAmelCase:Tuple = input_ids.ne(1).to(a__)
_lowerCAmelCase:Dict = MaMaaaForConditionalGeneration(a__).eval().to(a__)
if torch_device == "cuda":
model.half()
model.generate(a__ ,attention_mask=a__)
model.generate(num_beams=4 ,do_sample=a__ ,early_stopping=a__ ,num_return_sequences=3)
def UpperCAmelCase ( snake_case : Tuple ):
return torch.tensor(snake_case , dtype=torch.long , device=snake_case )
UpperCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''')
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''').to(a__)
_lowerCAmelCase:Union[str, Any] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]])
_lowerCAmelCase:List[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]])
_lowerCAmelCase:Optional[Any] = prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__)
with torch.no_grad():
_lowerCAmelCase:Optional[Any] = model(**a__)[0]
_lowerCAmelCase:Union[str, Any] = torch.Size((1, 11, 1024))
self.assertEqual(output.shape ,a__)
# change to expected output here
_lowerCAmelCase:List[Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] ,device=a__)
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__))
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(a__)
# change to intended input
_lowerCAmelCase:Union[str, Any] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]])
_lowerCAmelCase:List[str] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]])
_lowerCAmelCase:List[str] = prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__)
with torch.no_grad():
_lowerCAmelCase:int = model(**a__)[0]
_lowerCAmelCase:str = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape ,a__)
# change to expected output here
_lowerCAmelCase:Optional[int] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] ,device=a__)
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__))
def __UpperCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(a__)
_lowerCAmelCase:Union[str, Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' ,src_lang='''fr''' ,tgt_lang='''en''')
_lowerCAmelCase:Optional[Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCAmelCase:List[Any] = tokenizer(a__ ,padding=a__ ,return_tensors='''pt''')
_lowerCAmelCase:Any = model.generate(
input_ids=dct['''input_ids'''].to(a__) ,attention_mask=dct['''attention_mask'''].to(a__) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('''en''') ,)
_lowerCAmelCase:Union[str, Any] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
_lowerCAmelCase:Union[str, Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=a__ ,skip_special_tokens=a__)
assert generated == expected_en
| 439 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self : Dict ,a__ : List[Any] ,a__ : Optional[int]=13 ,a__ : Dict=7 ,a__ : Any=True ,a__ : Tuple=True ,a__ : Union[str, Any]=True ,a__ : Dict=True ,a__ : Dict=99 ,a__ : Optional[Any]=32 ,a__ : Dict=5 ,a__ : List[Any]=4 ,a__ : Union[str, Any]=37 ,a__ : str="gelu" ,a__ : Any=0.1 ,a__ : Optional[int]=0.1 ,a__ : Tuple=512 ,a__ : Any=16 ,a__ : Any=2 ,a__ : Dict=0.02 ,a__ : int=4 ,) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = parent
_lowerCAmelCase:List[str] = batch_size
_lowerCAmelCase:Tuple = seq_length
_lowerCAmelCase:List[Any] = is_training
_lowerCAmelCase:Dict = use_attention_mask
_lowerCAmelCase:Dict = use_token_type_ids
_lowerCAmelCase:Any = use_labels
_lowerCAmelCase:str = vocab_size
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Union[str, Any] = num_hidden_layers
_lowerCAmelCase:List[str] = num_attention_heads
_lowerCAmelCase:Optional[int] = intermediate_size
_lowerCAmelCase:str = hidden_act
_lowerCAmelCase:Optional[int] = hidden_dropout_prob
_lowerCAmelCase:Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase:Any = max_position_embeddings
_lowerCAmelCase:Optional[int] = type_vocab_size
_lowerCAmelCase:Optional[int] = type_sequence_label_size
_lowerCAmelCase:Tuple = initializer_range
_lowerCAmelCase:Dict = num_choices
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_lowerCAmelCase:int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
_lowerCAmelCase:List[Any] = None
if self.use_attention_mask:
_lowerCAmelCase:str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase:Dict = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=a__ ,)
return config, input_ids, attention_mask
def __UpperCamelCase ( self : Any) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[Any] = config_and_inputs
_lowerCAmelCase:Any = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class a__ ( UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
_lowerCAmelCase:str = FlaxDistilBertModelTester(self)
@slow
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCAmelCase:List[str] = model_class_name.from_pretrained('''distilbert-base-uncased''')
_lowerCAmelCase:Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(a__)
@require_flax
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
_lowerCAmelCase:Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_lowerCAmelCase:Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_lowerCAmelCase:List[str] = model(a__ ,attention_mask=a__)[0]
_lowerCAmelCase:List[str] = (1, 11, 768)
self.assertEqual(output.shape ,a__)
_lowerCAmelCase:int = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,a__ ,atol=1E-4))
| 439 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
__A = """bridgetower_vision_model"""
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=2_88 , __UpperCamelCase=1 , __UpperCamelCase=1E-05 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def __lowerCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
snake_case_ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
__A = """bridgetower_text_model"""
def __init__( self , __UpperCamelCase=5_02_65 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=1 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_14 , __UpperCamelCase=1 , __UpperCamelCase=1E-05 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def __lowerCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
snake_case_ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
__A = """bridgetower"""
def __init__( self , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=7_68 , __UpperCamelCase=1 , __UpperCamelCase=1E-05 , __UpperCamelCase=False , __UpperCamelCase="add" , __UpperCamelCase=12 , __UpperCamelCase=6 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = kwargs.pop('text_config_dict' , UpperCAmelCase__ )
snake_case_ = kwargs.pop('vision_config_dict' , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
snake_case_ = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
snake_case_ = BridgeTowerTextConfig(**UpperCAmelCase__ )
snake_case_ = BridgeTowerVisionConfig(**UpperCAmelCase__ )
@classmethod
def __lowerCAmelCase ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 187 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCAmelCase_ : Optional[int] = 300 # TEMPERATURE (unit = K)
def UpperCamelCase ( _A : float , _A : float , _A : float , )-> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCamelCase :
_lowerCAmelCase : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
_lowerCAmelCase : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
_lowerCAmelCase : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments,) )
((__UpperCAmelCase) , ) : Optional[Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowercase_ , decoder_config=lowercase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__UpperCAmelCase : Tuple = decoder_config.decoder_start_token_id
__UpperCAmelCase : str = decoder_config.pad_token_id
if decoder_start_token_id is None:
__UpperCAmelCase : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__UpperCAmelCase : Any = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__UpperCAmelCase : Any = decoder_config.eos_token_id
__UpperCAmelCase : Dict = decoder_start_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a_ ( ):
'''simple docstring'''
print('Making key files...' )
make_key_files('rsa' , 1_024 )
print('Key files generation successful.' )
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
print('Generating prime p...' )
_lowerCamelCase : Tuple =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
print('Generating prime q...' )
_lowerCamelCase : int =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
_lowerCamelCase : Tuple =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
_lowerCamelCase : Tuple =cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) )
_lowerCamelCase : Union[str, Any] =(n, e)
_lowerCamelCase : Optional[int] =(n, d)
return (public_key, private_key)
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =generate_key(SCREAMING_SNAKE_CASE__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 464 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCamelCase = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCamelCase = get_tests_dir('fixtures/vocab.json')
lowerCamelCase = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
UpperCamelCase__ : Dict =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =0
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : int =WavaVecaConfig()
_lowerCamelCase : Dict =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase : Any =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : List[str] =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : str =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Optional[int] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : Tuple =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : Dict =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write('{}' )
_lowerCamelCase : int =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
_lowerCamelCase : List[str] =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
_lowerCamelCase : int =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
_lowerCamelCase : Optional[int] =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
_lowerCamelCase : Optional[int] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : str =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[Any] =CustomTokenizer(lowercase_ )
_lowerCamelCase : Optional[int] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Optional[Any] =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : int =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Union[str, Any] ='AutoFeatureExtractor'
UpperCamelCase__ : str ='AutoTokenizer'
UpperCamelCase__ : List[Any] =False
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCamelCase : str =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Any =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCamelCase ( cls : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def lowerCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCamelCase : Tuple =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor' ) , push_to_hub=lowercase_ , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor-org' ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization='valid_org' , )
_lowerCamelCase : str =WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCamelCase : Optional[Any] =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Any =CustomTokenizer(lowercase_ )
_lowerCamelCase : List[Any] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_lowerCamelCase : List[str] =Repository(lowercase_ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_processing.py' ) ) )
repo.push_to_hub()
_lowerCamelCase : Tuple =AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 464 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
snake_case : int = Dataset.from_dict(__A )
return dataset
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = get_dataset()
snake_case : Union[str, Any] = make_duplicate_clusters(SCREAMING_SNAKE_CASE_ ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = get_dataset()
snake_case , snake_case : List[Any] = deduplicate_dataset(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,2 )
print(SCREAMING_SNAKE_CASE_ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] ,2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] ,SCREAMING_SNAKE_CASE_ )
| 315 |
from __future__ import annotations
from typing import Generic, TypeVar
__lowercase : Any = TypeVar('''T''')
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = data
snake_case : Dict = self
snake_case : str = 0
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# create a new set with x as its member
snake_case : Optional[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
snake_case : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case : Any = nodea
else:
snake_case : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) ,self.find_set(SCREAMING_SNAKE_CASE_ ) )
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
snake_case : dict[T, dict[T, int]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case : List[str] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : str = weight
snake_case : Optional[int] = weight
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = []
snake_case : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
snake_case : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
snake_case : str = 0
snake_case : Any = 0
snake_case : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Union[str, Any] = edges[index]
index += 1
snake_case : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return graph
| 315 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: Optional[Any] ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] ):
snake_case_ : Optional[int] = [[float("inf" ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_ ):
# looping through rows of graph array
for i in range(lowerCAmelCase_ ):
# looping through columns of graph array
for j in range(lowerCAmelCase_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase = int(input("Enter number of vertices: "))
UpperCAmelCase = int(input("Enter number of edges: "))
UpperCAmelCase = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCAmelCase = int(input("Enter source:"))
UpperCAmelCase = int(input("Enter destination:"))
UpperCAmelCase = float(input("Enter weight:"))
UpperCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 666 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__UpperCamelCase : Tuple = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__UpperCamelCase : Dict = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__UpperCamelCase : Optional[int] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__UpperCamelCase : int = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__UpperCamelCase : str = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__UpperCamelCase : Any = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__UpperCamelCase : Tuple = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def snake_case ( ):
'''simple docstring'''
__lowercase , __lowercase = randrange(len(lowerCamelCase ) ), randrange(len(lowerCamelCase ) )
__lowercase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__lowercase , __lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case ( lowerCamelCase = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = PokerHand(lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase ).compare_with(PokerHand(lowerCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert PokerHand(lowerCamelCase ).compare_with(PokerHand(lowerCamelCase ) ) == expected
def snake_case ( ):
'''simple docstring'''
__lowercase = [PokerHand(lowerCamelCase ) for hand in SORTED_HANDS]
__lowercase = poker_hands.copy()
shuffle(lowerCamelCase )
__lowercase = chain(sorted(lowerCamelCase ) )
for index, hand in enumerate(lowerCamelCase ):
assert hand == poker_hands[index]
def snake_case ( ):
'''simple docstring'''
__lowercase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case ( ):
'''simple docstring'''
__lowercase = PokerHand("""2C 4S AS 3D 5C""" )
__lowercase = True
__lowercase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case ( ):
'''simple docstring'''
__lowercase = 0
__lowercase = os.path.abspath(os.path.dirname(lowerCamelCase ) )
__lowercase = os.path.join(lowerCamelCase , """poker_hands.txt""" )
with open(lowerCamelCase ) as file_hand:
for line in file_hand:
__lowercase = line[:14].strip()
__lowercase = line[15:].strip()
__lowercase , __lowercase = PokerHand(lowerCamelCase ), PokerHand(lowerCamelCase )
__lowercase = player.compare_with(lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 53 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCAmelCase = """bert-base-cased"""
lowerCAmelCase = """google/pegasus-xsum"""
lowerCAmelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
lowerCAmelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """sshleifer/tiny-mbart"""
lowerCAmelCase = """sshleifer/tiny-marian-en-de"""
def lowerCAmelCase_ ( snake_case_ : Path , snake_case_ : list ) ->Any:
lowerCamelCase__ : Any ='\n'.join(snake_case_ )
Path(snake_case_ ).open('w' ).writelines(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case_ , f"""{split}.source""" ) , snake_case_ )
_dump_articles(os.path.join(snake_case_ , f"""{split}.target""" ) , snake_case_ )
return tmp_dir
class A_ ( A__ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =AutoTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : Optional[Any] =max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
lowerCamelCase__ : Tuple =max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
lowerCamelCase__ : Tuple =4
lowerCamelCase__ : str =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase__ , lowerCamelCase__ : int ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
lowerCamelCase__ : Optional[int] =SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase__ : Union[str, Any] =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : List[Any] =max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
lowerCamelCase__ : str =max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
lowerCamelCase__ : Dict =4
lowerCamelCase__ : Optional[int] =LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
lowerCamelCase__ : List[Any] =DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
lowerCamelCase__ : Union[str, Any] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase__ : List[str] =tmp_dir.joinpath('train.source' ).open().readlines()
lowerCamelCase__ : str =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 128 , lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] ={x.name for x in tmp_dir.iterdir()}
lowerCamelCase__ : Optional[int] ={x.name for x in save_dir.iterdir()}
lowerCamelCase__ : List[Any] =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self._get_dataset(max_len=64 )
lowerCamelCase__ : Dict =64
lowerCamelCase__ : List[str] =ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
lowerCamelCase__ : Dict =[len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
lowerCamelCase__ : Optional[int] =DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : str =[]
for batch in data_loader:
lowerCamelCase__ : Any =batch['input_ids'].shape
lowerCamelCase__ : List[Any] =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase__ : Union[str, Any] =np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f"""too many tokens in {len(lowerCamelCase_ )} batches""" )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self._get_dataset(max_len=512 )
lowerCamelCase__ : Dict =2
lowerCamelCase__ : Optional[Any] =ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : List[Any] =DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
lowerCamelCase__ : Tuple =tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Optional[int]=1_000 , lowerCamelCase_ :Optional[Any]=128 ):
"""simple docstring"""
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] ='examples/seq2seq/wmt_en_ro'
lowerCamelCase__ : Optional[int] =max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
lowerCamelCase__ : Tuple ='examples/seq2seq/test_data/wmt_en_ro'
lowerCamelCase__ : Union[str, Any] =max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Any =AutoTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[str] =SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =self._get_dataset()
lowerCamelCase__ : str =set(DistributedSortishSampler(lowerCamelCase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
lowerCamelCase__ : List[str] =set(DistributedSortishSampler(lowerCamelCase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
lowerCamelCase__ : Any =SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
lowerCamelCase__ : str =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase__ : str =SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
lowerCamelCase__ : Optional[Any] =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0 | 174 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase = {"""facebook/blenderbot-3B""": 1_28}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer
def __init__( self :Tuple , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :str="replace" , lowerCamelCase_ :Dict="<s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :Dict="</s>" , lowerCamelCase_ :List[str]="<s>" , lowerCamelCase_ :int="<unk>" , lowerCamelCase_ :List[Any]="<pad>" , lowerCamelCase_ :Optional[Any]="<mask>" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Dict , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : int =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : Tuple =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : Tuple =add_prefix_space
lowerCamelCase__ : Tuple =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : int ='post_processor'
lowerCamelCase__ : int =getattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
if tokenizer_component_instance:
lowerCamelCase__ : List[str] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Dict =tuple(state['sep'] )
if "cls" in state:
lowerCamelCase__ : int =tuple(state['cls'] )
lowerCamelCase__ : Tuple =False
if state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =add_prefix_space
lowerCamelCase__ : List[Any] =True
if state.get('trim_offsets' , lowerCamelCase_ ) != trim_offsets:
lowerCamelCase__ : Tuple =trim_offsets
lowerCamelCase__ : List[Any] =True
if changes_to_apply:
lowerCamelCase__ : Dict =getattr(lowerCamelCase_ , state.pop('type' ) )
lowerCamelCase__ : List[str] =component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else value
lowerCamelCase__ : Dict =value
def UpperCAmelCase__ ( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[self.sep_token_id]
lowerCamelCase__ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =' '.join(lowerCamelCase_ )
lowerCamelCase__ : int =self.encode(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : Dict =input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids | 174 | 1 |
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : int = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
SCREAMING_SNAKE_CASE__ : Any = "A photo of sks dog in a bucket"
SCREAMING_SNAKE_CASE__ : str = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 708 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __a ( a__ ):
def __init__( self : List[str] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*_snake_case ,**_snake_case )
requires_backends(self ,"""vision""" )
self.check_model_type(_snake_case )
def __call__( self : str ,lowerCamelCase : int ,**lowerCamelCase : int ):
'''simple docstring'''
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return {}, {}, {}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(_snake_case )
__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = self.image_processor(images=_snake_case ,return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model(**_snake_case )
return model_outputs
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.predicted_depth
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode="""bicubic""" ,align_corners=_snake_case )
__SCREAMING_SNAKE_CASE = prediction.squeeze().cpu().numpy()
__SCREAMING_SNAKE_CASE = (output * 255 / np.max(_snake_case )).astype("""uint8""" )
__SCREAMING_SNAKE_CASE = Image.fromarray(_snake_case )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = predicted_depth
__SCREAMING_SNAKE_CASE = depth
return output_dict
| 109 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_lowerCAmelCase = get_tests_dir('fixtures')
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :str ) ->Optional[int]:
# A mock response for an HTTP head request to emulate server down
lowercase : Any = mock.Mock()
lowercase : Optional[int] = 500
lowercase : Dict = {}
lowercase : List[Any] = HTTPError
lowercase : int = {}
# Download this model to make sure it's in the cache.
lowercase : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowercase : List[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self :Dict ) ->Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase : Union[str, Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def __snake_case ( self :int ) ->Dict:
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
lowercase : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__magic_name__ )
@is_staging_test
class UpperCamelCase (unittest.TestCase ):
@classmethod
def __snake_case ( cls :Optional[Any] ) ->int:
lowercase : List[str] = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __snake_case ( cls :Dict ) ->Any:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def __snake_case ( self :str ) ->Any:
lowercase : Optional[int] = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
lowercase : int = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id="""test-image-processor""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowercase : List[Any] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :Union[str, Any] ) ->str:
lowercase : Any = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
lowercase : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowercase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __snake_case ( self :str ) ->List[Any]:
CustomImageProcessor.register_for_auto_class()
lowercase : Optional[Any] = CustomImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
lowercase : Any = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 348 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( _A , _A ) -> Any:
lowercase : Optional[int] = old_name
if "patch_embed" in old_name:
lowercase , lowercase , lowercase : Union[str, Any] = old_name.split(""".""" )
if layer == "0":
lowercase : str = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
lowercase : str = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
lowercase : str = old_name.replace("""3""" , """convolution2""" )
else:
lowercase : List[Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _A ):
lowercase : List[str] = r"""\b\d{2}\b"""
if bool(re.search(_A , _A ) ):
lowercase : Union[str, Any] = re.search(r"""\d\.\d\d.""" , _A ).group()
else:
lowercase : int = re.search(r"""\d\.\d.""" , _A ).group()
if int(match[0] ) < 6:
lowercase : List[Any] = old_name.replace(_A , """""" )
lowercase : List[str] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
lowercase : Any = """intermediate_stages.""" + trimmed_name
else:
lowercase : int = old_name.replace(_A , """""" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase : Union[str, Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
lowercase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowercase : List[str] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
lowercase : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
lowercase : Dict = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
lowercase : Optional[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
lowercase : Any = trimmed_name.replace("""fc2""" , """linear_out""" )
lowercase : Optional[Any] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _A ):
lowercase : int = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
lowercase : Any = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase : List[Any] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase : str = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
lowercase : Optional[Any] = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
lowercase : Union[str, Any] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
lowercase : Dict = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
lowercase : List[str] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase : Tuple = new_name.replace("""norm""" , """layernorm""" )
lowercase : Union[str, Any] = """efficientformer.""" + new_name
else:
lowercase : Dict = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase ( _A , _A ) -> Any:
for key in checkpoint.copy().keys():
lowercase : int = checkpoint.pop(_A )
lowercase : List[Any] = val
return checkpoint
def UpperCamelCase ( ) -> Optional[int]:
lowercase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : int = Image.open(requests.get(_A , stream=_A ).raw )
return image
def UpperCamelCase ( _A , _A , _A , _A ) -> Optional[Any]:
lowercase : Tuple = torch.load(_A , map_location="""cpu""" )["""model"""]
lowercase : List[Any] = EfficientFormerConfig.from_json_file(_A )
lowercase : str = EfficientFormerForImageClassificationWithTeacher(_A )
lowercase : List[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
lowercase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase : List[str] = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
lowercase : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
lowercase : Tuple = prepare_img()
lowercase : Dict = 256
lowercase : List[Any] = 224
lowercase : Optional[Any] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
lowercase : List[Any] = processor(images=_A , return_tensors="""pt""" ).pixel_values
# original processing pipeline
lowercase : Tuple = Compose(
[
Resize(_A , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
lowercase : List[str] = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
lowercase : Any = model(_A )
lowercase : Any = outputs.logits
lowercase : Dict = (1, 1_000)
if "l1" in model_name:
lowercase : Optional[int] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase : List[str] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase : str = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_A )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=_A , )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 348 | 1 |
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]:
return choice(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> int:
__A : List[str] = random_pivot(a__ )
# partition based on pivot
# linear time
__A : Optional[int] = [e for e in lst if e < pivot]
__A : Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a__ ) < k - 1:
return kth_number(a__ ,k - len(a__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a__ ,a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( snake_case_=None ):
_A : Union[str, Any] = argparse.ArgumentParser(add_help=snake_case_,allow_abbrev=snake_case_ )
# The main config parser
_A : Union[str, Any] = config_command_parser(snake_case_ )
# The subparser to add commands to
_A : Union[str, Any] = config_parser.add_subparsers(title="""subcommands""",dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(snake_case_,parents=[parent_parser] )
update_command_parser(snake_case_,parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ):
_A : Optional[Any] = get_config_parser()
_A : List[Any] = config_parser.parse_args()
if not hasattr(snake_case_,"""func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 307 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = CLIPTokenizer
lowerCamelCase__ = CLIPTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {}
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : int = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Dict , **_lowerCamelCase :int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , **_lowerCamelCase :List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : int = '''lower newer'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : List[str] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@require_ftfy
def SCREAMING_SNAKE_CASE_ ( self :Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_s.tokenize(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE : Any = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE : Dict = tokenizer_s.tokenize(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE : Dict = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE : str = tokenizer_s.tokenize(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : List[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE : str = f'''{text_of_1_token} {text_of_1_token}'''
__SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Any = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
__SCREAMING_SNAKE_CASE : int = f''' {text}'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : str = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# CLIP always lower cases letters
pass
| 401 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : str = 3
__SCREAMING_SNAKE_CASE : Optional[int] = (3_2, 3_2)
__SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
def extract(*_lowerCamelCase :List[Any] , **_lowerCamelCase :Optional[Any] ):
class snake_case :
def __init__( self :Any ):
__SCREAMING_SNAKE_CASE : List[str] = torch.ones([0] )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Any ):
self.pixel_values.to(_lowerCamelCase )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : str = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = self.dummy_vae
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : Tuple = 7_7
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image.to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Any = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : str = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_vae
__SCREAMING_SNAKE_CASE : str = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : str = 7_7
__SCREAMING_SNAKE_CASE : Dict = self.dummy_image.to(_lowerCamelCase )
# put models in fp16
__SCREAMING_SNAKE_CASE : str = unet.half()
__SCREAMING_SNAKE_CASE : List[str] = vae.half()
__SCREAMING_SNAKE_CASE : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Optional[int] = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((7_6_0, 5_0_4) )
__SCREAMING_SNAKE_CASE : int = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__SCREAMING_SNAKE_CASE : str = init_image.resize((7_6_8, 5_1_2) )
__SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__SCREAMING_SNAKE_CASE : int = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 401 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__A = get_logger(__name__)
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
snake_case : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
snake_case : int = module._original_module if isinstance(SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class lowerCamelCase__ :
a__ : Dict = []
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
snake_case : Optional[int] = obj
snake_case : str = target
snake_case : Optional[Any] = new
snake_case : Tuple = target.split("." )[0]
snake_case : Tuple = {}
snake_case : Optional[int] = attrs or []
def __enter__( self ):
"""simple docstring"""
*snake_case , snake_case : Dict = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE ) ):
try:
snake_case : List[Any] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case : int = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE , _PatchedModuleObj(SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
snake_case : Optional[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
snake_case : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case : Any = getattr(import_module(".".join(SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE ) is attr_value:
snake_case : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE )
setattr(self.obj , SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE , self.original.pop(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 134 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : int = """audio-spectrogram-transformer"""
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1E-12 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1_024 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : Tuple = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Any = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : str = patch_size
snake_case : Any = qkv_bias
snake_case : str = frequency_stride
snake_case : Dict = time_stride
snake_case : int = max_length
snake_case : List[str] = num_mel_bins
| 134 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowerCAmelCase , ) | 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 270 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.