code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
if len(_lowerCamelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCamelCase__ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
A_ : Union[str, Any] = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
A_ : Dict = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = (images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ : List[str] = numpy_to_pil(snake_case_ )
return images
def lowerCamelCase_ ( _lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase__ : List[str] = images[None, ...]
lowerCamelCase__ : int = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase__ : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowerCamelCase__ : Tuple = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'ibert'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=False, lowerCamelCase_="none", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : Tuple = force_dequant
class a_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Optional[int] = "tf"
else:
A_ : Tuple = "jax"
class a_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = ByTaTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : int = []
for i in range(len(_lowerCamelCase ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Tuple = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), _lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_lowerCamelCase ), _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
lowerCamelCase__ : str = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
lowerCamelCase__ : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : List[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase, clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_lowerCamelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
lowerCamelCase__ : List[str] = ' ' + output_txt
lowerCamelCase__ : str = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCamelCase__ : Optional[int] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = 'Unicode €.'
lowerCamelCase__ : List[str] = tokenizer(_lowerCamelCase )
lowerCamelCase__ : List[str] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'], _lowerCamelCase )
# decoding
lowerCamelCase__ : List[Any] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase, 'Unicode €.</s>' )
lowerCamelCase__ : Optional[Any] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'], _lowerCamelCase )
# decoding
lowerCamelCase__ : List[Any] = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase, 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), 'e è é ê ë</s>' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : str = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCamelCase__ : Dict = tokenizer(_lowerCamelCase, padding=_lowerCamelCase, return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
if FRAMEWORK != "jax":
lowerCamelCase__ : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
self.assertEqual((2, 3_7), batch.input_ids.shape )
self.assertEqual((2, 3_7), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(_lowerCamelCase, padding=_lowerCamelCase, return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', _lowerCamelCase )
self.assertIn('attention_mask', _lowerCamelCase )
self.assertNotIn('decoder_input_ids', _lowerCamelCase )
self.assertNotIn('decoder_attention_mask', _lowerCamelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : List[str] = tokenizer(
text_target=_lowerCamelCase, max_length=3_2, padding='max_length', truncation=_lowerCamelCase, return_tensors=_lowerCamelCase )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : List[str] = ['A long paragraph for summarization. </s>']
lowerCamelCase__ : int = ['Summary of the text. </s>']
# fmt: off
lowerCamelCase__ : Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCamelCase__ : Optional[int] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCamelCase__ : Optional[int] = tokenizer(_lowerCamelCase, text_target=_lowerCamelCase )
self.assertEqual(_lowerCamelCase, batch['input_ids'][0] )
self.assertEqual(_lowerCamelCase, batch['labels'][0] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : Optional[int] = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : int = after_tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase__ : Dict = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : int = after_tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(_lowerCamelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[int] = json.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCamelCase, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_lowerCamelCase, _lowerCamelCase )
with open(os.path.join(_lowerCamelCase, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_lowerCamelCase, _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : int = tokenizer_class.from_pretrained(
_lowerCamelCase, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : List[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=_lowerCamelCase )]
lowerCamelCase__ : Optional[int] = tokenizer_class.from_pretrained(
_lowerCamelCase, additional_special_tokens=_lowerCamelCase, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(_lowerCamelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_tokenizers(fast=_lowerCamelCase, do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Optional[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(
_lowerCamelCase, skip_special_tokens=_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase, attr + '_id', _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase, _lowerCamelCase ), _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase, attr + '_id' ), _lowerCamelCase )
setattr(_lowerCamelCase, attr + '_id', _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase, _lowerCamelCase ), _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase, attr + '_id' ), _lowerCamelCase )
setattr(_lowerCamelCase, 'additional_special_tokens_ids', [] )
self.assertListEqual(getattr(_lowerCamelCase, 'additional_special_tokens' ), [] )
self.assertListEqual(getattr(_lowerCamelCase, 'additional_special_tokens_ids' ), [] )
setattr(_lowerCamelCase, 'additional_special_tokens_ids', [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase, 'additional_special_tokens' ), [token_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase, 'additional_special_tokens_ids' ), [token_id_to_test_setters] )
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a_ ( _A ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'umt5'
lowerCamelCase__ : Union[str, Any] = ['past_key_values']
def __init__(self, lowerCamelCase_=2_5_0_1_1_2, lowerCamelCase_=5_1_2, lowerCamelCase_=6_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=8, lowerCamelCase_=None, lowerCamelCase_=6, lowerCamelCase_=3_2, lowerCamelCase_=1_2_8, lowerCamelCase_=0.1, lowerCamelCase_=1e-6, lowerCamelCase_=1.0, lowerCamelCase_="gated-gelu", lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="T5Tokenizer", lowerCamelCase_=True, lowerCamelCase_=0, lowerCamelCase_=1, lowerCamelCase_=0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase_, tokenizer_class=lowerCamelCase_, tie_word_embeddings=lowerCamelCase_, pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = d_model
lowerCamelCase__ : Optional[Any] = d_kv
lowerCamelCase__ : Dict = d_ff
lowerCamelCase__ : Dict = num_layers
lowerCamelCase__ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : Union[str, Any] = num_heads
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = relative_attention_max_distance
lowerCamelCase__ : List[Any] = dropout_rate
lowerCamelCase__ : Optional[int] = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_factor
lowerCamelCase__ : Dict = feed_forward_proj
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : Any = self.feed_forward_proj.split('-' )
lowerCamelCase__ : Any = act_info[-1]
lowerCamelCase__ : Optional[int] = act_info[0] == "gated"
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : Any = "gelu_new"
@property
def a__ (self ):
'''simple docstring'''
return self.d_model
@property
def a__ (self ):
'''simple docstring'''
return self.num_heads
@property
def a__ (self ):
'''simple docstring'''
return self.num_layers
class a_ ( _A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCamelCase__ : Tuple = "past_encoder_sequence + sequence"
lowerCamelCase__ : str = {0: "batch"}
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ (self ):
'''simple docstring'''
return 1_3
@property
def a__ (self ):
'''simple docstring'''
return 5e-4
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import random
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Dict = {i: [] for i in range(a__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a__ ):
for j in range(i + 1 , a__ ):
if random.random() < probability:
graph[i].append(a__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a__ )
return graph
def lowerCamelCase_ ( _lowerCamelCase ):
return {
i: [j for j in range(a__ ) if i != j] for i in range(a__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = split_dict._to_yaml_list()
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = SplitDict._from_yaml_list(UpperCamelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__ : int = None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__ : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase__ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCamelCase_ ( _lowerCamelCase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCamelCase__ : Optional[int] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCamelCase_ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ : str = Path(_UpperCAmelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
lowerCamelCase__ : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Union[str, Any] = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
A_ : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ : int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split('.' ):
lowerCamelCase__ : Optional[int] = getattr(_lowercase , _lowercase )
if weight_type is not None:
lowerCamelCase__ : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Any = value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] = value
else:
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[int] = fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ : Tuple = None
for name, value in fairseq_dict.items():
lowerCamelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[Any] = True
elif name.split('.' )[0] == "proj":
lowerCamelCase__ : str = fairseq_model.proj
lowerCamelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Optional[Any] = True
if "*" in mapped_key:
lowerCamelCase__ : List[Any] = name.split(_lowercase )[0].split('.' )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
lowerCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : Dict = 'weight_v'
elif "bias" in name:
lowerCamelCase__ : Dict = 'bias'
elif "weight" in name:
lowerCamelCase__ : str = 'weight'
else:
lowerCamelCase__ : List[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Tuple = name.split('.' )
lowerCamelCase__ : Optional[int] = int(items[0] )
lowerCamelCase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = emb.weight.shape
lowerCamelCase__ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
lowerCamelCase__ : Union[str, Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowerCamelCase ):
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ : Optional[int] = f.readlines()
lowerCamelCase__ : str = [line.split(' ' )[0] for line in lines]
lowerCamelCase__ : Tuple = len(_lowercase )
lowerCamelCase__ : Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ : Optional[int] = WavaVecaConfig.from_pretrained(_lowercase )
lowerCamelCase__ : str = SpeechaTextaConfig.from_pretrained(
_lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase )
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCamelCase__ : Any = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ : Optional[int] = WavaVecaModel(_lowercase )
lowerCamelCase__ : Optional[int] = recursively_load_weights_wavaveca(model.encoder , _lowercase )
lowerCamelCase__ : int = SpeechaTextaForCausalLM(_lowercase )
lowerCamelCase__ , lowerCamelCase__ : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCamelCase__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : Any = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
lowerCamelCase__ : str = False
# add projection layer
lowerCamelCase__ : str = nn.Parameter(projection_layer.weight )
lowerCamelCase__ : Dict = nn.Parameter(projection_layer.bias )
lowerCamelCase__ : Optional[Any] = create_vocab_dict(_lowercase )
with open(os.path.join(_lowercase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowercase , _lowercase )
lowerCamelCase__ : Dict = SpeechaTextaTokenizer(os.path.join(_lowercase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowercase )
lowerCamelCase__ : List[str] = hf_wavavec.config.to_dict()
lowerCamelCase__ : int = tokenizer.pad_token_id
lowerCamelCase__ : int = tokenizer.bos_token_id
lowerCamelCase__ : Dict = tokenizer.eos_token_id
lowerCamelCase__ : int = 'speech_to_text_2'
lowerCamelCase__ : List[Any] = 'wav2vec2'
lowerCamelCase__ : Dict = SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
A_ : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : Any = 42
lowerCamelCase__ : Any = 42
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : list[list[Edge]] = [[] for _ in range(_lowercase )]
lowerCamelCase__ : Dict = size
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def a__ (self ):
'''simple docstring'''
return self._size
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_lowercase, _lowercase ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = deque([start_vertex] )
lowerCamelCase__ : list[int | None] = [None] * self.size
lowerCamelCase__ : Tuple = 0
while queue:
lowerCamelCase__ : Union[str, Any] = queue.popleft()
lowerCamelCase__ : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ : Union[str, Any] = current_distance + edge.weight
lowerCamelCase__ : Dict = distances[edge.destination_vertex]
if (
isinstance(_lowercase, _lowercase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 1000 ):
lowerCamelCase__ : Dict = -1
lowerCamelCase__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : List[str] = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Tuple = a * b * c
if candidate >= product:
lowerCamelCase__ : Dict = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a_ :
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
return self.get_dummy_input()
@property
def a__ (self ):
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def a__ (self, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Tuple = 3_2
lowerCamelCase__ : int = (3_2, 3_2)
lowerCamelCase__ : Tuple = torch.manual_seed(0 )
lowerCamelCase__ : Tuple = torch.device(_a )
lowerCamelCase__ : int = (batch_size, num_channels) + sizes
lowerCamelCase__ : Optional[int] = randn_tensor(_a, generator=_a, device=_a )
lowerCamelCase__ : Dict = {'hidden_states': hidden_states}
if include_temb:
lowerCamelCase__ : Tuple = 1_2_8
lowerCamelCase__ : List[Any] = randn_tensor((batch_size, temb_channels), generator=_a, device=_a )
if include_res_hidden_states_tuple:
lowerCamelCase__ : Optional[int] = torch.manual_seed(1 )
lowerCamelCase__ : Union[str, Any] = (randn_tensor(_a, generator=_a, device=_a ),)
if include_encoder_hidden_states:
lowerCamelCase__ : List[str] = floats_tensor((batch_size, 3_2, 3_2) ).to(_a )
if include_skip_sample:
lowerCamelCase__ : List[Any] = randn_tensor(((batch_size, 3) + sizes), generator=_a, device=_a )
return dummy_input
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {
'in_channels': 3_2,
'out_channels': 3_2,
'temb_channels': 1_2_8,
}
if self.block_type == "up":
lowerCamelCase__ : int = 3_2
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCamelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Dict = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = unet_block(**_a )
if isinstance(_a, _a ):
lowerCamelCase__ : Dict = output[0]
self.assertEqual(output.shape, self.output_shape )
lowerCamelCase__ : List[str] = output[0, -1, -3:, -3:]
lowerCamelCase__ : str = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten(), _a, atol=5e-3 )
@unittest.skipIf(torch_device == 'mps', 'Training is not supported in mps' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = self.block_class(**_a )
model.to(_a )
model.train()
lowerCamelCase__ : Any = model(**_a )
if isinstance(_a, _a ):
lowerCamelCase__ : str = output[0]
lowerCamelCase__ : Optional[int] = torch.device(_a )
lowerCamelCase__ : Optional[Any] = randn_tensor(output.shape, device=_a )
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(_a, _a )
loss.backward()
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ ( snake_case_ ):
'''simple docstring'''
pass
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = data
lowerCamelCase__ : Optional[int] = None
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self
lowerCamelCase__ : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
lowerCamelCase__ : Optional[Any] = node.next_node
@property
def a__ (self ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
A_ : List[Any] = Node(1)
A_ : str = Node(2)
A_ : Optional[Any] = Node(3)
A_ : List[Any] = Node(4)
print(root_node.has_loop) # False
A_ : Tuple = root_node.next_node
print(root_node.has_loop) # True
A_ : List[Any] = Node(5)
A_ : Dict = Node(6)
A_ : List[str] = Node(5)
A_ : Any = Node(6)
print(root_node.has_loop) # False
A_ : Dict = Node(1)
print(root_node.has_loop) # False
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( _lowerCamelCase ):
create_state_space_tree(_lowerCamelCase , [] , 0 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import math
import sys
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = ""
try:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as binary_file:
lowerCamelCase__ : List[Any] = binary_file.read()
for dat in data:
lowerCamelCase__ : List[str] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = {"0": "0", "1": "1"}
lowerCamelCase__ : List[str] = "", ""
lowerCamelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase__ : Optional[int] = lexicon[curr_string]
result += last_match_id
lowerCamelCase__ : int = last_match_id + "0"
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
lowerCamelCase__ : List[str] = {}
for curr_key in list(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : str = lexicon.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : int = new_lex
lowerCamelCase__ : Optional[Any] = last_match_id + "1"
index += 1
lowerCamelCase__ : List[Any] = ""
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = 8
try:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as opened_file:
lowerCamelCase__ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCamelCase__ : Dict = data_bits[counter:]
lowerCamelCase__ : str = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = read_file_binary(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[Any] = remove_prefix(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[Any] = decompress_data(SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[str] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 42
lowerCamelCase__ : Any = 42
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[str] = capacity_factor
lowerCamelCase__ : str = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Union[str, Any] = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Any = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : List[str] = [None] * new_size
lowerCamelCase__ : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : int = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
lowerCamelCase__ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase_ ), torch_builtin(lowerCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase_ ), gelu_new(lowerCamelCase_ ) ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : int = get_activation('gelu_10' )
lowerCamelCase__ : Optional[int] = torch_builtin(lowerCamelCase_ )
lowerCamelCase__ : Tuple = geluaa(lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.where(y_gelu_aa < 10.0, 1, 0 )
self.assertTrue(torch.max(lowerCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_aa * clipped_mask ) )
def a__ (self ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCamelCase_ ):
get_activation('bogus' )
with self.assertRaises(lowerCamelCase_ ):
get_activation(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = get_activation('gelu' )
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a, 1 )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : str = acta.a
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
A_ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
A_ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
A_ : Optional[int] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__, snake_case__, sample_weight=snake_case__ ) ),
}
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
try:
lowerCamelCase__ : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ : str = strtobool(lowerCAmelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A_ : str = parse_flag_from_env("RUN_SLOW", default=False)
A_ : List[str] = parse_flag_from_env("RUN_REMOTE", default=False)
A_ : List[str] = parse_flag_from_env("RUN_LOCAL", default=True)
A_ : Optional[Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
A_ : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
A_ : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
A_ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
A_ : Optional[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
A_ : List[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
A_ : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
A_ : Any = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
lowerCamelCase__ : List[Any] = unittest.skip('test requires faiss' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
lowerCamelCase__ : List[Any] = unittest.skip('test requires regex' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase__ : List[str] = unittest.skip('test requires elasticsearch' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase__ : List[Any] = unittest.skip('test requires sqlalchemy' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
lowerCamelCase__ : int = unittest.skip('test requires PyTorch' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not config.TF_AVAILABLE:
lowerCamelCase__ : Optional[int] = unittest.skip('test requires TensorFlow' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
lowerCamelCase__ : Union[str, Any] = unittest.skip('test requires JAX' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
lowerCamelCase__ : List[Any] = unittest.skip('test requires Pillow' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(lowerCAmelCase_ )
else:
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(lowerCAmelCase_ )
else:
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(lowerCAmelCase_ )
else:
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase ):
try:
import spacy # noqa F401
spacy.load(lowerCAmelCase_ )
except ImportError:
return unittest.skip('test requires spacy' )(lowerCAmelCase_ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(lowerCAmelCase_ ) )(lowerCAmelCase_ )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(lowerCAmelCase_ )
else:
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(lowerCAmelCase_ )
else:
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase__ : Union[str, Any] = unittest.skip('test is slow' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase__ : int = unittest.skip('test is local' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase__ : Tuple = unittest.skip('test is packaged' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase__ : List[str] = unittest.skip('test requires remote' )(lowerCAmelCase_ )
return test_case
def lowerCamelCase_ ( *_lowerCamelCase ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCAmelCase_ ) and name.startswith('test' ):
for decorator in decorators:
lowerCamelCase__ : Dict = decorator(lowerCAmelCase_ )
setattr(cls , lowerCAmelCase_ , lowerCAmelCase_ )
return cls
return decorate
class a_ ( __lowerCamelCase ):
'''simple docstring'''
pass
class a_ ( __lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : List[Any] = 2
@contextmanager
def lowerCamelCase_ ( _lowerCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase=1e-16 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = requests.Session().request
def timeout_request(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase__ : Optional[Any] = "https://10.255.255.1"
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowerCamelCase__ : Dict = timeout
try:
return online_request(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase__ : str = url
lowerCamelCase__ : Dict = e.args[0]
lowerCamelCase__ : Dict = (max_retry_error.args[0].replace('10.255.255.1' , f'''OfflineMock[{url}]''' ),)
lowerCamelCase__ : Tuple = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
raise requests.ConnectionError('Offline mode is enabled.' , request=lowerCAmelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCAmelCase_ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowerCamelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCAmelCase_ , **lowerCAmelCase_ ) as tmp_dir:
try:
os.chdir(lowerCAmelCase_ )
yield
finally:
os.chdir(lowerCAmelCase_ )
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase__ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase__ : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return deepcopy(lowerCAmelCase_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCAmelCase_ ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
try:
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
except HTTPError as err:
if str(lowerCAmelCase_ ).startswith('500' ) or str(lowerCAmelCase_ ).startswith('502' ):
pytest.xfail(str(lowerCAmelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCAmelCase_ )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = returncode
lowerCamelCase__ : Dict = stdout
lowerCamelCase__ : int = stderr
async def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while True:
lowerCamelCase__ : List[Any] = await stream.readline()
if line:
callback(lowerCAmelCase_ )
else:
break
async def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(lowerCAmelCase_ ) )
lowerCamelCase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ : str = []
lowerCamelCase__ : Tuple = []
def tee(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="" ):
lowerCamelCase__ : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(lowerCAmelCase_ )
if not quiet:
print(lowerCAmelCase_ , lowerCAmelCase_ , file=lowerCAmelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stderr , label='stderr:' ) ),
] , timeout=lowerCAmelCase_ , )
return _RunOutput(await p.wait() , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=180 , _lowerCamelCase=False , _lowerCamelCase=True ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = asyncio.get_event_loop()
lowerCamelCase__ : List[str] = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase_ , env=lowerCAmelCase_ , stdin=lowerCAmelCase_ , timeout=lowerCAmelCase_ , quiet=lowerCAmelCase_ , echo=lowerCAmelCase_ ) )
lowerCamelCase__ : int = " ".join(lowerCAmelCase_ )
if result.returncode > 0:
lowerCamelCase__ : Any = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def lowerCamelCase_ ( ):
'''simple docstring'''
lowerCamelCase__ : int = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
lowerCamelCase__ : Dict = re.sub(r'^gw' , '' , lowerCAmelCase_ , 0 , re.M )
return int(lowerCAmelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 2_9500
lowerCamelCase__ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Any = eval_examples
lowerCamelCase__ : Optional[int] = post_process_function
lowerCamelCase__ : Dict = quant_trainer_args
lowerCamelCase__ : List[str] = 1_2_8 # default number of calibration samples
def a__ (self, lowerCamelCase_=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCamelCase__ : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCamelCase__ : List[str] = self._remove_unused_columns(lowerCamelCase_, description='Calibration' )
return DataLoader(
lowerCamelCase_, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=lowerCamelCase_, )
def a__ (self, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.train_dataset if calib_dataset is None else calib_dataset
lowerCamelCase__ : Dict = self.get_calib_dataloader(lowerCamelCase_ )
lowerCamelCase__ : Any = self.model
quant_trainer.configure_model(lowerCamelCase_, self.quant_trainer_args, calib=lowerCamelCase_ )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase_ )
logger.info('***** Running calibration *****' )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(lowerCamelCase_ ):
# Prediction step
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.prediction_step(lowerCamelCase_, lowerCamelCase_, prediction_loss_only=lowerCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase_, self.quant_trainer_args )
lowerCamelCase__ : List[Any] = model
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_ = "eval" ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__ : int = self.get_eval_dataloader(lowerCamelCase_ )
lowerCamelCase__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__ : str = self.compute_metrics
lowerCamelCase__ : str = None
lowerCamelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__ : Optional[Any] = eval_loop(
lowerCamelCase_, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase_, )
finally:
lowerCamelCase__ : Any = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCamelCase__ : Tuple = self.post_process_function(lowerCamelCase_, lowerCamelCase_, output.predictions )
lowerCamelCase__ : str = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCamelCase__ : List[str] = metrics.pop(lowerCamelCase_ )
self.log(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__ : Dict = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase_ )
return metrics
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_ = "test" ):
'''simple docstring'''
lowerCamelCase__ : Any = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__ : Optional[int] = self.compute_metrics
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__ : Tuple = eval_loop(
lowerCamelCase_, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase_, )
finally:
lowerCamelCase__ : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__ : Union[str, Any] = self.post_process_function(lowerCamelCase_, lowerCamelCase_, output.predictions, 'predict' )
lowerCamelCase__ : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCamelCase__ : Optional[Any] = metrics.pop(lowerCamelCase_ )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase_ )
def a__ (self, lowerCamelCase_="./" ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.eval_dataset
lowerCamelCase__ : Tuple = self.get_eval_dataloader(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = next(iter(lowerCamelCase_ ) )
# saving device - to make it consistent
lowerCamelCase__ : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCamelCase__ : str = tuple(v.to(lowerCamelCase_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Dict = self.model.to(lowerCamelCase_ )
model.eval()
model.float()
lowerCamelCase__ : Tuple = model.module if hasattr(lowerCamelCase_, 'module' ) else model
quant_trainer.configure_model(lowerCamelCase_, self.quant_trainer_args )
lowerCamelCase__ : Dict = os.path.join(lowerCamelCase_, 'model.onnx' )
logger.info(f'''exporting model to {output_model_file}''' )
lowerCamelCase__ : str = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, export_params=lowerCamelCase_, opset_version=1_3, do_constant_folding=lowerCamelCase_, input_names=['input_ids', 'attention_mask', 'token_type_ids'], output_names=['output_start_logits', 'output_end_logits'], dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
}, verbose=lowerCamelCase_, )
logger.info('onnx export finished' )
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : str = {'vocab_file': 'sentencepiece.model'}
A_ : Optional[Any] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
A_ : int = {
'google/rembert': 2_56,
}
class a_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="[CLS]", lowerCamelCase_="[SEP]", lowerCamelCase_="[UNK]", lowerCamelCase_="[SEP]", lowerCamelCase_="[PAD]", lowerCamelCase_="[CLS]", lowerCamelCase_="[MASK]", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE, remove_space=_SCREAMING_SNAKE_CASE, keep_accents=_SCREAMING_SNAKE_CASE, bos_token=_SCREAMING_SNAKE_CASE, eos_token=_SCREAMING_SNAKE_CASE, unk_token=_SCREAMING_SNAKE_CASE, sep_token=_SCREAMING_SNAKE_CASE, pad_token=_SCREAMING_SNAKE_CASE, cls_token=_SCREAMING_SNAKE_CASE, mask_token=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE, )
lowerCamelCase__ : Union[str, Any] = do_lower_case
lowerCamelCase__ : Tuple = remove_space
lowerCamelCase__ : Optional[int] = keep_accents
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def a__ (self ):
'''simple docstring'''
return len(self.sp_model )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Union[str, Any] = None
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = d
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('Vocabulary path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
lowerCamelCase__ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file, _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Union[str, Any] = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase_ ( _lowerCamelCase ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def lowerCamelCase_ ( _lowerCamelCase ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase__ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A_ , id=A_ )
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 100 , ):
'''simple docstring'''
lowerCamelCase__ : Any = x_start
lowerCamelCase__ : List[str] = fnc(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = 0.0
for _ in range(_lowerCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCamelCase__ : Any = (x_end - x_start) / steps + xa
lowerCamelCase__ : List[str] = fnc(_lowerCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCamelCase__ : Any = xa
lowerCamelCase__ : Dict = fxa
return area
if __name__ == "__main__":
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
A_ : Tuple = 10
while i <= 10_00_00:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
A_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config, 'steps_offset' ) and scheduler.config.steps_offset != 1:
lowerCamelCase__ : int = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1', '1.0.0', __a, standard_warn=__a )
lowerCamelCase__ : Dict = dict(scheduler.config )
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = FrozenDict(__a )
if hasattr(scheduler.config, 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase__ : Any = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set', '1.0.0', __a, standard_warn=__a )
lowerCamelCase__ : int = dict(scheduler.config )
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=__a, segmentation_processor=__a, vae=__a, text_encoder=__a, tokenizer=__a, unet=__a, scheduler=__a, safety_checker=__a, feature_extractor=__a, )
def a__ (self, lowerCamelCase_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def a__ (self ):
'''simple docstring'''
self.enable_attention_slicing(__a )
def a__ (self ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ : List[str] = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a, __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ (self ):
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 5_0, lowerCamelCase_ = 7.5, lowerCamelCase_ = None, lowerCamelCase_ = 1, lowerCamelCase_ = 0.0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = "pil", lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = 1, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = self.segmentation_processor(
text=[text], images=[image], padding='max_length', return_tensors='pt' ).to(self.device )
lowerCamelCase__ : str = self.segmentation_model(**__a )
lowerCamelCase__ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase__ : Optional[int] = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase__ : Dict = StableDiffusionInpaintPipeline(
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, )
return inpainting_pipeline(
prompt=__a, image=__a, mask_image=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, )
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.dummy_uncond_unet
lowerCamelCase__ : List[str] = ScoreSdeVeScheduler()
lowerCamelCase__ : Any = ScoreSdeVePipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple = torch.manual_seed(0 )
lowerCamelCase__ : Tuple = sde_ve(num_inference_steps=2, output_type='numpy', generator=lowerCamelCase_ ).images
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : int = sde_ve(num_inference_steps=2, output_type='numpy', generator=lowerCamelCase_, return_dict=lowerCamelCase_ )[
0
]
lowerCamelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = """google/ncsnpp-church-256"""
lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
sde_ve.to(lowerCamelCase_ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = sde_ve(num_inference_steps=1_0, output_type='numpy', generator=lowerCamelCase_ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCamelCase__ : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a_ ( __lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'char'
lowerCamelCase__ : Union[str, Any] = 'bpe'
lowerCamelCase__ : str = 'wp'
A_ : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a_ ( __lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['image_processor', 'char_tokenizer']
lowerCamelCase__ : int = 'ViTImageProcessor'
lowerCamelCase__ : Any = 'MgpstrTokenizer'
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', UpperCamelCase_, )
lowerCamelCase__ : Any = kwargs.pop('feature_extractor' )
lowerCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCamelCase__ : Optional[Any] = tokenizer
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained('gpt2' )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase_, UpperCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase__ : Tuple = self.image_processor(UpperCamelCase_, return_tensors=UpperCamelCase_, **UpperCamelCase_ )
if text is not None:
lowerCamelCase__ : Optional[int] = self.char_tokenizer(UpperCamelCase_, return_tensors=UpperCamelCase_, **UpperCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__ : List[str] = encodings['input_ids']
return inputs
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = sequences
lowerCamelCase__ : str = char_preds.size(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self._decode_helper(UpperCamelCase_, 'char' )
lowerCamelCase__ , lowerCamelCase__ : Any = self._decode_helper(UpperCamelCase_, 'bpe' )
lowerCamelCase__ , lowerCamelCase__ : Dict = self._decode_helper(UpperCamelCase_, 'wp' )
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Tuple = []
for i in range(UpperCamelCase_ ):
lowerCamelCase__ : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase__ : List[str] = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase__ : List[Any] = scores.index(max(UpperCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = final_strs
lowerCamelCase__ : Tuple = final_scores
lowerCamelCase__ : List[str] = char_strs
lowerCamelCase__ : Dict = bpe_strs
lowerCamelCase__ : Optional[Any] = wp_strs
return out
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCamelCase__ : Tuple = self.char_decode
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : str = '[s]'
elif format == DecodeType.BPE:
lowerCamelCase__ : List[str] = self.bpe_decode
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : str = '#'
elif format == DecodeType.WORDPIECE:
lowerCamelCase__ : Optional[int] = self.wp_decode
lowerCamelCase__ : Any = 1_0_2
lowerCamelCase__ : List[Any] = '[SEP]'
else:
raise ValueError(f'''Format {format} is not supported.''' )
lowerCamelCase__ , lowerCamelCase__ : List[str] = [], []
lowerCamelCase__ : List[Any] = pred_logits.size(0 )
lowerCamelCase__ : Optional[Any] = pred_logits.size(1 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = pred_logits.topk(1, dim=-1, largest=UpperCamelCase_, sorted=UpperCamelCase_ )
lowerCamelCase__ : List[str] = preds_index.view(-1, UpperCamelCase_ )[:, 1:]
lowerCamelCase__ : List[str] = decoder(UpperCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = torch.nn.functional.softmax(UpperCamelCase_, dim=2 ).max(dim=2 )
lowerCamelCase__ : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(UpperCamelCase_ ):
lowerCamelCase__ : Optional[int] = preds_str[index].find(UpperCamelCase_ )
lowerCamelCase__ : Any = preds_str[index][:pred_eos]
lowerCamelCase__ : str = preds_index[index].cpu().tolist()
lowerCamelCase__ : Tuple = pred_index.index(UpperCamelCase_ ) if eos_token in pred_index else -1
lowerCamelCase__ : Any = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase__ : str = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_ )
conf_scores.append(UpperCamelCase_ )
return dec_strs, conf_scores
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(UpperCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a_ ( _UpperCamelCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'num_encoder_blocks' ) )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=6_4, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=[2, 2, 2, 2], lowerCamelCase_=[8, 4, 2, 1], lowerCamelCase_=[1_6, 3_2, 6_4, 1_2_8], lowerCamelCase_=[1, 4, 8, 1_6], lowerCamelCase_=[1, 2, 4, 8], lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[int] = num_encoder_blocks
lowerCamelCase__ : Optional[Any] = sr_ratios
lowerCamelCase__ : Any = depths
lowerCamelCase__ : Dict = hidden_sizes
lowerCamelCase__ : Union[str, Any] = downsampling_rates
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Union[str, Any] = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = SegformerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : List[str] = SegformerForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss, 0.0 )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : List[Any] = SegformerForSemanticSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertGreater(result.loss, 0.0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = SegformerModelTester(self )
lowerCamelCase__ : Union[str, Any] = SegformerConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
lowerCamelCase__ : int = False
lowerCamelCase__ : str = True
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Dict = outputs.attentions
lowerCamelCase__ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Optional[int] = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
# verify the last attentions (last block, last layer)
lowerCamelCase__ : Tuple = (self.model_tester.image_size // 3_2) ** 2
lowerCamelCase__ : Optional[int] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], )
lowerCamelCase__ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(out_len + 1, len(lowerCamelCase_ ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Dict = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Dict = outputs.hidden_states
lowerCamelCase__ : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ):
continue
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : List[Any] = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Any = model(**lowerCamelCase_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
@slow
def a__ (self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = SegformerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : str = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCamelCase_ )
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Optional[Any] = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : int = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Any = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : Any = model(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], lowerCamelCase_, atol=1e-1 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=lowerCamelCase_, align=lowerCamelCase_, do_random_crop=lowerCamelCase_ )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCamelCase_ )
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : int = encoded_inputs.pixel_values.to(lowerCamelCase_ )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.logits.detach().cpu()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_, target_sizes=[(5_0_0, 3_0_0)] )
lowerCamelCase__ : Union[str, Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
lowerCamelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape, lowerCamelCase_ )
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A_ : List[str] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
A_ : Optional[int] = None
def lowerCamelCase_ ( ):
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=a__ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=a__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase__ : Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def lowerCamelCase_ ( _lowerCamelCase ):
def remove_articles(_lowerCamelCase ):
return ARTICLES_REGEX.sub(' ' , a__ )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
lowerCamelCase__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowerCamelCase_ ( _lowerCamelCase ):
if not s:
return []
return normalize_answer(a__ ).split()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = get_tokens(a__ )
lowerCamelCase__ : str = get_tokens(a__ )
lowerCamelCase__ : List[Any] = collections.Counter(a__ ) & collections.Counter(a__ )
lowerCamelCase__ : Dict = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase__ : int = 1.0 * num_same / len(a__ )
lowerCamelCase__ : List[Any] = 1.0 * num_same / len(a__ )
lowerCamelCase__ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase__ : Tuple = qa['id']
lowerCamelCase__ : List[str] = [t for t in qa['answers']['text'] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase__ : str = ['']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
lowerCamelCase__ : Optional[Any] = preds[qid]
# Take max over all gold answers
lowerCamelCase__ : Any = max(compute_exact(a__ , a__ ) for a in gold_answers )
lowerCamelCase__ : Tuple = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = {}
for qid, s in scores.items():
lowerCamelCase__ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase__ : List[Any] = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase__ : str = s
return new_scores
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if not qid_list:
lowerCamelCase__ : List[str] = len(a__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCamelCase__ : List[Any] = len(a__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for k in new_eval:
lowerCamelCase__ : int = new_eval[k]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
plt.step(a__ , a__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(a__ , a__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCamelCase__ : Optional[Any] = sorted(a__ , key=lambda _lowerCamelCase : na_probs[k] )
lowerCamelCase__ : Any = 0.0
lowerCamelCase__ : List[Any] = 1.0
lowerCamelCase__ : Union[str, Any] = 0.0
lowerCamelCase__ : str = [1.0]
lowerCamelCase__ : Dict = [0.0]
lowerCamelCase__ : int = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase__ : List[Any] = true_pos / float(i + 1 )
lowerCamelCase__ : Dict = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 100.0 * avg_prec}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
lowerCamelCase__ : Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase__ : Dict = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase__ : Optional[int] = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCamelCase__ : List[str] = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase__ : Dict = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(a__ , a__ , 'pr_exact' )
merge_eval(a__ , a__ , 'pr_f1' )
merge_eval(a__ , a__ , 'pr_oracle' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not qid_list:
return
lowerCamelCase__ : Dict = [na_probs[k] for k in qid_list]
lowerCamelCase__ : int = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(a__ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase__ : str = num_no_ans
lowerCamelCase__ : str = cur_score
lowerCamelCase__ : Union[str, Any] = 0.0
lowerCamelCase__ : Tuple = sorted(a__ , key=lambda _lowerCamelCase : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase__ : List[Any] = scores[qid]
else:
if preds[qid]:
lowerCamelCase__ : Union[str, Any] = -1
else:
lowerCamelCase__ : int = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase__ : Any = cur_score
lowerCamelCase__ : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(a__ ), best_thresh
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : str = find_best_thresh(a__ , a__ , a__ , a__ )
lowerCamelCase__ , lowerCamelCase__ : int = find_best_thresh(a__ , a__ , a__ , a__ )
lowerCamelCase__ : str = best_exact
lowerCamelCase__ : Optional[Any] = exact_thresh
lowerCamelCase__ : int = best_fa
lowerCamelCase__ : Optional[int] = fa_thresh
def lowerCamelCase_ ( ):
with open(OPTS.data_file ) as f:
lowerCamelCase__ : Optional[int] = json.load(a__ )
lowerCamelCase__ : Any = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCamelCase__ : List[Any] = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase__ : str = json.load(a__ )
else:
lowerCamelCase__ : Optional[int] = {k: 0.0 for k in preds}
lowerCamelCase__ : List[str] = make_qid_to_has_ans(a__ ) # maps qid to True/False
lowerCamelCase__ : int = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase__ , lowerCamelCase__ : Tuple = get_raw_scores(a__ , a__ )
lowerCamelCase__ : Tuple = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
lowerCamelCase__ : Union[str, Any] = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
lowerCamelCase__ : Tuple = make_eval_dict(a__ , a__ )
if has_ans_qids:
lowerCamelCase__ : Optional[Any] = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'HasAns' )
if no_ans_qids:
lowerCamelCase__ : Any = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
A_ : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = len(snake_case__ )
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if numbers[j] < numbers[i]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A_ : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
A_ : Tuple = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Optional[Any] = self.vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
lowerCamelCase__ : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, head_mask=UpperCAmelCase__ )
lowerCamelCase__ : int = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : List[str] = OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : List[Any] = config_and_inputs
lowerCamelCase__ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase__ : List[str] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : str = super()._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__, return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=UpperCAmelCase__, )
lowerCamelCase__ : Dict = inputs_dict['''labels''']
lowerCamelCase__ : Optional[int] = inputs_dict['''labels''']
lowerCamelCase__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=UpperCAmelCase__, )
lowerCamelCase__ : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCAmelCase__ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = OpenAIGPTModelTester(self )
lowerCamelCase__ : int = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=UpperCAmelCase__ ) # the president is
lowerCamelCase__ : str = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist(), UpperCAmelCase__ )
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
A_ : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
A_ : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = {}
with open(_lowerCamelCase , 'r' ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = line.strip()
if line:
lowerCamelCase__ : Tuple = line.split()
lowerCamelCase__ : Optional[int] = line_number
lowerCamelCase__ : Optional[Any] = words[0]
lowerCamelCase__ : Optional[int] = value
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split('.' ):
lowerCamelCase__ : int = getattr(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase__ : List[Any] = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Dict = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCamelCase__ : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = shape_pointer.shape
# let's reduce dimension
lowerCamelCase__ : int = value[0]
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase__ : int = value
elif weight_type == "weight_g":
lowerCamelCase__ : Any = value
elif weight_type == "weight_v":
lowerCamelCase__ : List[str] = value
elif weight_type == "bias":
lowerCamelCase__ : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCamelCase__ : List[str] = getattr(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = value
else:
lowerCamelCase__ : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase__ : Dict = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Optional[int] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : List[str] = ".".join([key, hf_param_name] )
else:
lowerCamelCase__ : Union[str, Any] = key
lowerCamelCase__ : Union[str, Any] = value if "lm_head" in full_key else value[0]
A_ : int = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCamelCase__ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : List[Any] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Any = True
if "*" in mapped_key:
lowerCamelCase__ : Optional[Any] = name.split(_lowerCamelCase )[0].split('.' )[-2]
lowerCamelCase__ : str = mapped_key.replace('*' , _lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[int] = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ : Optional[int] = "weight_v"
elif "bias" in name:
lowerCamelCase__ : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Tuple = "weight"
else:
lowerCamelCase__ : str = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Any = fairseq_model.state_dict()
lowerCamelCase__ : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : int = True
else:
lowerCamelCase__ : Optional[int] = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Any = name.split('.' )
lowerCamelCase__ : List[Any] = int(items[0] )
lowerCamelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCamelCase__ : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCamelCase__ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCamelCase__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCamelCase__ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=False ):
if config_path is not None:
lowerCamelCase__ : Any = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : List[str] = WavaVecaConfig()
if is_seq_class:
lowerCamelCase__ : List[Any] = read_txt_into_dict(_lowerCamelCase )
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Any = WavaVecaForSequenceClassification(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
lowerCamelCase__ : List[str] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Tuple = target_dict.pad_index
lowerCamelCase__ : str = target_dict.bos_index
lowerCamelCase__ : int = target_dict.eos_index
lowerCamelCase__ : Any = len(target_dict.symbols )
lowerCamelCase__ : int = os.path.join(_lowerCamelCase , 'vocab.json' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Dict = 1
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowerCamelCase , )
lowerCamelCase__ : List[str] = True if config.feat_extract_norm == "layer" else False
lowerCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
lowerCamelCase__ : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : int = WavaVecaForCTC(_lowerCamelCase )
else:
lowerCamelCase__ : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
lowerCamelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ : List[Any] = argparse.Namespace(task='audio_pretraining' )
lowerCamelCase__ : Optional[int] = fairseq.tasks.setup_task(_lowerCamelCase )
lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
lowerCamelCase__ : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
A_ : List[str] = parser.parse_args()
A_ : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ : List[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Union[str, Any] = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Tuple = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a_ ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'xlm-roberta-xl'
def __init__(self, lowerCamelCase_=2_5_0_8_8_0, lowerCamelCase_=2_5_6_0, lowerCamelCase_=3_6, lowerCamelCase_=3_2, lowerCamelCase_=1_0_2_4_0, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_4, lowerCamelCase_=1, lowerCamelCase_=0.02, lowerCamelCase_=1e-05, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : Optional[int] = classifier_dropout
class a_ ( lowercase__ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Tuple = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A_ : Any = None
A_ : int = logging.get_logger(__name__)
A_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A_ : Union[str, Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
A_ : Any = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
A_ : Union[str, Any] = '▁'
class a_ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = BigBirdTokenizer
lowerCamelCase__ : int = ['input_ids', 'attention_mask']
lowerCamelCase__ : Union[str, Any] = []
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<unk>", lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="<pad>", lowerCamelCase_="[SEP]", lowerCamelCase_="[MASK]", lowerCamelCase_="[CLS]", **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else bos_token
lowerCamelCase__ : Any = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else eos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else unk_token
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else pad_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else cls_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase, tokenizer_file=__lowerCamelCase, bos_token=__lowerCamelCase, eos_token=__lowerCamelCase, unk_token=__lowerCamelCase, sep_token=__lowerCamelCase, pad_token=__lowerCamelCase, cls_token=__lowerCamelCase, mask_token=__lowerCamelCase, **__lowerCamelCase, )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : List[Any] = False if not self.vocab_file else True
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict = os.path.join(
__lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file, __lowerCamelCase )
return (out_vocab_file,)
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 10 ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase__ : str = 10**n
lowerCamelCase__ : Tuple = 2_8433 * (pow(2 , 783_0457 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(10) = }")
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A_ : str = TypeVar("T")
A_ : Any = TypeVar("U")
class a_ ( Generic[T, U] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = key
lowerCamelCase__ : List[Any] = val
lowerCamelCase__ : DoubleLinkedListNode[T, U] | None = None
lowerCamelCase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__(self ):
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class a_ ( Generic[T, U] ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_, lowercase_ )
lowerCamelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_, lowercase_ )
lowerCamelCase__ : Union[str, Any] = self.rear, self.head
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = ["DoubleLinkedList"]
lowerCamelCase__ : Dict = self.head
while node.next is not None:
rep.append(str(lowercase_ ) )
lowerCamelCase__ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCamelCase__ : Tuple = node
lowerCamelCase__ : str = previous
lowerCamelCase__ : Optional[Any] = node
lowerCamelCase__ : Any = self.rear
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowerCamelCase__ : Union[str, Any] = node.next
lowerCamelCase__ : Optional[int] = node.prev
lowerCamelCase__ : str = None
lowerCamelCase__ : int = None
return node
class a_ ( Generic[T, U] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {}
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCamelCase__ : List[str] = capacity
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self ):
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__(self, lowerCamelCase_ ):
'''simple docstring'''
return key in self.cache
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
lowerCamelCase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCamelCase__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_ )
return node.val
self.miss += 1
return None
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCamelCase__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCamelCase__ : Dict = DoubleLinkedListNode(lowercase_, lowercase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCamelCase__ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCamelCase__ : Optional[Any] = value
self.list.add(lowercase_ )
@classmethod
def a__ (cls, lowerCamelCase_ = 1_2_8 ):
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCamelCase__ : List[str] = LRUCache(lowercase_ )
lowerCamelCase__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCamelCase__ : Any = func(*lowercase_ )
cls.decorator_function_to_instance_map[func].put(args[0], lowercase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_, 'cache_info', lowercase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : Tuple = 192
lowerCamelCase__ : Optional[Any] = 768
lowerCamelCase__ : Optional[Any] = 12
lowerCamelCase__ : int = 3
lowerCamelCase__ : Tuple = [800, 1333]
lowerCamelCase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Tuple = 330
lowerCamelCase__ : str = 14
lowerCamelCase__ : int = 6
lowerCamelCase__ : Optional[int] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Union[str, Any] = 384
lowerCamelCase__ : Optional[int] = 1536
lowerCamelCase__ : str = 12
lowerCamelCase__ : Tuple = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Union[str, Any] = [800, 1344]
lowerCamelCase__ : List[Any] = 91
lowerCamelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCamelCase__ : List[Any] = 'coco-detection-id2label.json'
lowerCamelCase__ : str = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : str = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Dict = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
if "backbone" in name:
lowerCamelCase__ : List[Any] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowerCamelCase__ : Tuple = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowerCamelCase__ : Optional[Any] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowerCamelCase__ : str = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowerCamelCase__ : List[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowerCamelCase__ : Optional[int] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowerCamelCase__ : str = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowerCamelCase__ : int = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowerCamelCase__ : int = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : str = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : List[str] = key.split('.' )
lowerCamelCase__ : Optional[int] = int(key_split[2] )
lowerCamelCase__ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : List[str] = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCamelCase__ : Any = val[-dim:, :]
else:
lowerCamelCase__ : str = val[:dim]
lowerCamelCase__ : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase__ : List[Any] = val[-dim:]
else:
lowerCamelCase__ : int = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Any = get_yolos_config(_lowerCamelCase )
# load original state_dict
lowerCamelCase__ : List[Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# load 🤗 model
lowerCamelCase__ : List[str] = YolosForObjectDetection(_lowerCamelCase )
model.eval()
lowerCamelCase__ : str = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : Dict = 800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : Any = YolosImageProcessor(format='coco_detection' , size=_lowerCamelCase )
lowerCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Optional[int] = model(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__ : Any = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
lowerCamelCase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : List[str] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
lowerCamelCase__ : int = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
lowerCamelCase__ : Tuple = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
lowerCamelCase__ : Tuple = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
lowerCamelCase__ : str = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowerCamelCase__ : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCamelCase , organization='hustvl' )
model.push_to_hub(_lowerCamelCase , organization='hustvl' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a_ ( __lowercase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self._create_example_records()
lowerCamelCase__ : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names, ['col_1', 'col_2'] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_, example_records[i] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._create_example_records()
lowerCamelCase__ : Dict = Dataset.from_list(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info, dset_from_dict.info )
def a__ (self ): # checks what happens with missing columns
'''simple docstring'''
lowerCamelCase__ : List[Any] = [{"col_1": 1}, {"col_2": "x"}]
lowerCamelCase__ : int = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0], {'col_1': 1} )
self.assertDictEqual(dset[1], {'col_1': None} ) # NB: first record is used for columns
def a__ (self ): # checks if the type can be inferred from the second record
'''simple docstring'''
lowerCamelCase__ : int = [{"col_1": []}, {"col_1": [1, 2]}]
lowerCamelCase__ : Any = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['col_1'], Sequence(Value('int64' ) ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ), 0 )
self.assertListEqual(dset.column_names, [] )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A_ : Optional[int] = get_tests_dir("fixtures")
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : Dict = 5_0_0
lowerCamelCase__ : int = {}
lowerCamelCase__ : Dict = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=__UpperCamelCase ) as mock_head:
lowerCamelCase__ : Optional[int] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
lowerCamelCase__ : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Dict = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase, getattr(__UpperCamelCase, __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase, repo_id='test-image-processor', push_to_hub=__UpperCamelCase, use_auth_token=self._token )
lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase, getattr(__UpperCamelCase, __UpperCamelCase ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
lowerCamelCase__ : List[str] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase, getattr(__UpperCamelCase, __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase, repo_id='valid_org/test-image-processor-org', push_to_hub=__UpperCamelCase, use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase, getattr(__UpperCamelCase, __UpperCamelCase ) )
def a__ (self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__ : Any = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
lowerCamelCase__ : Dict = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''', trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( A_ ):
'''simple docstring'''
lowerCamelCase__ : Any = '''nllb-moe'''
lowerCamelCase__ : Tuple = ['''past_key_values''']
lowerCamelCase__ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self, lowerCamelCase_=1_2_8_1_1_2, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.05, lowerCamelCase_=0.05, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_="relu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=2, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_="float32", lowerCamelCase_=False, lowerCamelCase_=1_2_8, lowerCamelCase_=6_4, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=0.001, lowerCamelCase_=0.001, lowerCamelCase_="all", lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=1.0, lowerCamelCase_=0.2, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : List[Any] = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : List[Any] = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Optional[int] = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : str = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : Optional[Any] = router_z_loss_coef
lowerCamelCase__ : Tuple = router_aux_loss_coef
lowerCamelCase__ : Optional[int] = decoder_sparse_step
lowerCamelCase__ : Optional[Any] = encoder_sparse_step
lowerCamelCase__ : Union[str, Any] = num_experts
lowerCamelCase__ : int = expert_capacity
lowerCamelCase__ : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowerCamelCase__ : List[str] = router_dtype
lowerCamelCase__ : str = router_ignore_padding_tokens
lowerCamelCase__ : int = batch_prioritized_routing
lowerCamelCase__ : List[Any] = second_expert_policy
lowerCamelCase__ : Dict = normalize_router_prob_before_dropping
lowerCamelCase__ : Any = moe_eval_capacity_token_fraction
lowerCamelCase__ : Optional[Any] = moe_token_dropout
lowerCamelCase__ : Dict = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from manim import *
class a_ ( a__ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.5, width=0.5 )
lowerCamelCase__ : Any = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : List[str] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0 )
lowerCamelCase__ : Dict = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0 )
lowerCamelCase__ : Union[str, Any] = VGroup(lowerCamelCase_, lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0 )
lowerCamelCase__ : Dict = Text('CPU', font_size=2_4 )
lowerCamelCase__ : Optional[int] = Group(lowerCamelCase_, lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0.5, aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = [mem.copy() for i in range(1 )]
lowerCamelCase__ : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0 )
lowerCamelCase__ : List[str] = Text('GPU', font_size=2_4 )
lowerCamelCase__ : Dict = Group(lowerCamelCase_, lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0.5, aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_, lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCamelCase__ : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0 )
lowerCamelCase__ : Dict = Text('Model', font_size=2_4 )
lowerCamelCase__ : Union[str, Any] = Group(lowerCamelCase_, lowerCamelCase_ ).arrange(lowerCamelCase_, buff=0.5, aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_, run_time=1 ), Create(lowerCamelCase_, run_time=1 ), Create(lowerCamelCase_, run_time=1 ), )
lowerCamelCase__ : List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=2_4, )
lowerCamelCase__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : List[str] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_, run_time=2.5 ), Write(lowerCamelCase_ ), Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : int = []
lowerCamelCase__ : int = []
for i, rect in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_, opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
lowerCamelCase__ : Any = 0.46 / 4
lowerCamelCase__ : Tuple = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowerCamelCase_, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowerCamelCase_, buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_, run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class a_ ( lowercase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = '''dpr'''
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_="absolute", lowerCamelCase_ = 0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = projection_dim
lowerCamelCase__ : Optional[Any] = position_embedding_type
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A_ : List[Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
A_ : Any = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A_ : List[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A_ : List[Any] = sorted(arg_to_scheduler.keys())
A_ : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class a_ ( pl.LightningModule ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_="base", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase_ )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : str = Path(self.hparams.output_dir )
lowerCamelCase__ : List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({'num_labels': num_labels} if num_labels is not None else {}), cache_dir=lowerCAmelCase_, **lowerCAmelCase_, )
else:
lowerCamelCase__ : Optional[Any] = config
lowerCamelCase__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, lowerCAmelCase_, lowerCAmelCase_ ):
assert hasattr(self.config, lowerCAmelCase_ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config, lowerCAmelCase_, getattr(self.hparams, lowerCAmelCase_ ) )
if tokenizer is None:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=lowerCAmelCase_, )
else:
lowerCamelCase__ : List[Any] = tokenizer
lowerCamelCase__ : Dict = MODEL_MODES[mode]
if model is None:
lowerCamelCase__ : List[str] = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool('.ckpt' in self.hparams.model_name_or_path ), config=self.config, cache_dir=lowerCAmelCase_, )
else:
lowerCamelCase__ : Tuple = model
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_type.from_pretrained(*lowerCAmelCase_, **lowerCAmelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCamelCase__ : Dict = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
lowerCamelCase__ : Dict = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model
lowerCamelCase__ : Union[str, Any] = ['bias', 'LayerNorm.weight']
lowerCamelCase__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
lowerCamelCase__ : Optional[Any] = Adafactor(
lowerCAmelCase_, lr=self.hparams.learning_rate, scale_parameter=lowerCAmelCase_, relative_step=lowerCAmelCase_ )
else:
lowerCamelCase__ : Optional[Any] = AdamW(
lowerCAmelCase_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
lowerCamelCase__ : Tuple = optimizer
lowerCamelCase__ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return self.validation_step(lowerCAmelCase_, lowerCAmelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.validation_end(lowerCAmelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCamelCase__ : Any = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if stage == "test":
lowerCamelCase__ : Optional[Any] = len(self.test_dataloader().dataset )
else:
lowerCamelCase__ : Optional[int] = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=lowerCAmelCase_ )
lowerCamelCase__ : Optional[int] = len(self.train_dataloader().dataset )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def a__ (self ):
'''simple docstring'''
return self.train_loader
def a__ (self ):
'''simple docstring'''
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=lowerCAmelCase_ )
def a__ (self ):
'''simple docstring'''
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=lowerCAmelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir, 'cached_{}_{}_{}'.format(
lowerCAmelCase_, list(filter(lowerCAmelCase_, self.hparams.model_name_or_path.split('/' ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.output_dir.joinpath('best_tfmr' )
lowerCamelCase__ : List[Any] = self.step_count
self.model.save_pretrained(lowerCAmelCase_ )
self.tokenizer.save_pretrained(lowerCAmelCase_ )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path', default=lowerCAmelCase_, type=lowerCAmelCase_, required=lowerCAmelCase_, help='Path to pretrained model or model identifier from huggingface.co/models', )
parser.add_argument(
'--config_name', default='', type=lowerCAmelCase_, help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name', default=lowerCAmelCase_, type=lowerCAmelCase_, help='Pretrained tokenizer name or path if not the same as model_name', )
parser.add_argument(
'--cache_dir', default=str(Path(lowerCAmelCase_ ).parent / 'test_run' / 'cache' ), type=lowerCAmelCase_, help='Where do you want to store the pre-trained models downloaded from huggingface.co', )
parser.add_argument(
'--encoder_layerdrop', type=lowerCAmelCase_, help='Encoder layer dropout probability (Optional). Goes into model.config', )
parser.add_argument(
'--decoder_layerdrop', type=lowerCAmelCase_, help='Decoder layer dropout probability (Optional). Goes into model.config', )
parser.add_argument(
'--dropout', type=lowerCAmelCase_, help='Dropout probability (Optional). Goes into model.config', )
parser.add_argument(
'--attention_dropout', type=lowerCAmelCase_, help='Attention dropout probability (Optional). Goes into model.config', )
parser.add_argument('--learning_rate', default=5e-5, type=lowerCAmelCase_, help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler', default='linear', choices=lowerCAmelCase_, metavar=lowerCAmelCase_, type=lowerCAmelCase_, help='Learning rate scheduler', )
parser.add_argument('--weight_decay', default=0.0, type=lowerCAmelCase_, help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon', default=1e-8, type=lowerCAmelCase_, help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps', default=0, type=lowerCAmelCase_, help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers', default=4, type=lowerCAmelCase_, help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs', dest='max_epochs', default=3, type=lowerCAmelCase_ )
parser.add_argument('--train_batch_size', default=3_2, type=lowerCAmelCase_ )
parser.add_argument('--eval_batch_size', default=3_2, type=lowerCAmelCase_ )
parser.add_argument('--adafactor', action='store_true' )
class a_ ( pl.Callback ):
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a_ ( pl.Callback ):
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase_ )
class a_ ( pl.Callback ):
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = trainer.lr_schedulers[0]['scheduler']
lowerCamelCase__ : Optional[int] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
lowerCamelCase__ : List[str] = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCAmelCase_, str(metrics[key] ) ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
lowerCamelCase__ : List[Any] = trainer.callback_metrics
# Log and save results to file
lowerCamelCase__ : List[Any] = os.path.join(pl_module.hparams.output_dir, 'test_results.txt' )
with open(lowerCAmelCase_, 'w' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCAmelCase_, str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(lowerCAmelCase_, str(metrics[key] ) ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
parser.add_argument(
'--output_dir' , default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=__lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=__lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=__lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=__lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=__lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
pl.seed_everything(args.seed )
# init model
lowerCamelCase__ : Tuple = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
lowerCamelCase__ : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCAmelCase )
if logging_callback is None:
lowerCamelCase__ : List[Any] = LoggingCallback()
lowerCamelCase__ : List[str] = {}
if args.fpaa:
lowerCamelCase__ : List[str] = 16
if args.gpus > 1:
lowerCamelCase__ : Dict = 'auto'
lowerCamelCase__ : Any = 'ddp'
lowerCamelCase__ : List[Any] = args.accumulate_grad_batches
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Any = 'auto'
lowerCamelCase__ : Any = pl.Trainer.from_argparse_args(
__lowerCAmelCase , weights_summary=__lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCAmelCase , )
if args.do_train:
trainer.fit(__lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = len(a_ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase__ : Optional[int] = 0
print(a_ , end=',' )
# Consider rest of the activities
for j in range(a_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a_ , end=',' )
lowerCamelCase__ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : List[str] = [1, 3, 0, 5, 8, 5]
A_ : Optional[int] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = {} # Mapping from char to TrieNode
lowerCamelCase__ : int = False
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for word in words:
self.insert(_A )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Any = TrieNode()
lowerCamelCase__ : int = curr.nodes[char]
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Optional[Any] = curr.nodes[char]
return curr.is_leaf
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
def _delete(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ) -> bool:
if index == len(_A ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : Dict = False
return len(curr.nodes ) == 0
lowerCamelCase__ : Union[str, Any] = word[index]
lowerCamelCase__ : str = curr.nodes.get(_A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : Any = _delete(_A, _A, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, _A, 0 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if node.is_leaf:
print(UpperCAmelCase__ , end=' ' )
for key, value in node.nodes.items():
print_words(UpperCAmelCase__ , word + key )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = 'banana bananas bandana band apple all beast'.split()
lowerCamelCase__ : Any = TrieNode()
root.insert_many(UpperCAmelCase__ )
# print_words(root, "")
assert all(root.find(UpperCAmelCase__ ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
print(str(UpperCAmelCase__ ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCamelCase_ ( ):
assert test_trie()
def lowerCamelCase_ ( ):
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return 1 if input_a == input_a else 0
def lowerCamelCase_ ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
'''simple docstring'''
@staticmethod
def a__ (*lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', )
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase__ : Optional[int] = image_classifier(lowerCamelCase_, candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase_ ), [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
], )
lowerCamelCase__ : int = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
], )
@require_tf
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf' )
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase__ : List[Any] = image_classifier(lowerCamelCase_, candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}], )
lowerCamelCase__ : Any = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
], )
@slow
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase__ : Tuple = image_classifier(lowerCamelCase_, candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
], )
lowerCamelCase__ : Optional[int] = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5, )
@slow
@require_tf
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase__ : Optional[int] = image_classifier(lowerCamelCase_, candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
], )
lowerCamelCase__ : Optional[int] = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5, )
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCamelCase , max_perimeter + 1 ):
lowerCamelCase__ : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCamelCase ):
lowerCamelCase__ : Dict = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase_ ( _lowerCamelCase = 1000 ):
lowerCamelCase__ : int = pythagorean_triple(_lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = k_size // 2
lowerCamelCase__ : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase__ : Tuple = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase__ : Dict = height - k_size + 1
lowerCamelCase__ : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase__ : Any = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase__ : Dict = 0
for i, j in product(range(_lowerCamelCase ) , range(_lowerCamelCase ) ):
lowerCamelCase__ : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase__ : Optional[int] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase__ : Optional[Any] = gen_gaussian_kernel(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = ravel(_lowerCamelCase )
# reshape and get the dst image
lowerCamelCase__ : List[Any] = dot(_lowerCamelCase , _lowerCamelCase ).reshape(_lowerCamelCase , _lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
A_ : Optional[Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
A_ : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A_ : List[Any] = gaussian_filter(gray, 3, sigma=1)
A_ : Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A_ : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = os.path.abspath(os.path.join('examples', 'by_feature' ) )
lowerCamelCase__ : List[Any] = os.path.abspath('examples' )
for item in os.listdir(lowerCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase__ : Tuple = os.path.join(lowerCamelCase_, lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase_, feature_script=lowerCamelCase_, tested_section='main()' if parser_only else 'training_function()', ):
lowerCamelCase__ : List[Any] = compare_against_test(
os.path.join(lowerCamelCase_, lowerCamelCase_ ), lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = '''\n'''.join(lowerCamelCase_ )
if special_strings is not None:
for string in special_strings:
lowerCamelCase__ : Union[str, Any] = diff.replace(lowerCamelCase_, '' )
self.assertEqual(lowerCamelCase_, '' )
def a__ (self ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py', lowerCamelCase_ )
self.one_complete_example('complete_nlp_example.py', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = os.path.abspath(os.path.join('examples', 'cv_example.py' ) )
lowerCamelCase__ : Dict = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('complete_cv_example.py', lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
self.one_complete_example('complete_cv_example.py', lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class a_ ( UpperCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = False
@classmethod
def a__ (cls ):
'''simple docstring'''
super().setUpClass()
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = os.path.join(cls._tmpdir, 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase__ : Dict = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a__ (cls ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0' ) ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
lowerCamelCase__ : List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2' ) ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n '''.split()
lowerCamelCase__ : Dict = run_command(self._launch_args + testargs, return_stdout=lowerCamelCase_ )
self.assertNotIn('epoch 0:', lowerCamelCase_ )
self.assertIn('epoch 1:', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n '''.split()
lowerCamelCase__ : Dict = run_command(self._launch_args + testargs, return_stdout=lowerCamelCase_ )
if torch.cuda.is_available():
lowerCamelCase__ : List[str] = torch.cuda.device_count()
else:
lowerCamelCase__ : List[Any] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', lowerCamelCase_ )
self.assertIn('epoch 1:', lowerCamelCase_ )
else:
self.assertIn('epoch 0:', lowerCamelCase_ )
self.assertIn('epoch 1:', lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCamelCase__ : Any = run_command(self._launch_args + testargs, return_stdout=lowerCamelCase_ )
lowerCamelCase__ : Dict = re.findall('({.+})', lowerCamelCase_ )
lowerCamelCase__ : int = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase__ : List[str] = ast.literal_eval(lowerCamelCase_ )
self.assertGreaterEqual(results['accuracy'], 0.75 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'} )
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase__ : Optional[int] = f'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_, 'tracking' ) ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a_ ( lowercase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '''bert-generation'''
def __init__(self, lowerCamelCase_=5_0_3_5_8, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_4, lowerCamelCase_=1_6, lowerCamelCase_=4_0_9_6, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_="absolute", lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__ : int = test_metrics
@require_cpu
def a__ (self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main, num_processes=1 )
@require_cpu
def a__ (self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def a__ (self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def a__ (self ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase__ : Optional[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_, env=os.environ.copy() )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''{sampling_rate}'''
lowerCamelCase__ : Union[str, Any] = '1'
lowerCamelCase__ : List[Any] = 'f32le'
lowerCamelCase__ : Optional[int] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase__ : Union[str, Any] = ffmpeg_process.communicate(_lowerCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase__ : Optional[Any] = output_stream[0]
lowerCamelCase__ : str = np.frombuffer(_lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "f32le" , ):
lowerCamelCase__ : Union[str, Any] = f'''{sampling_rate}'''
lowerCamelCase__ : Dict = '1'
if format_for_conversion == "s16le":
lowerCamelCase__ : Optional[int] = 2
elif format_for_conversion == "f32le":
lowerCamelCase__ : Any = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase__ : int = platform.system()
if system == "Linux":
lowerCamelCase__ : Optional[int] = 'alsa'
lowerCamelCase__ : str = 'default'
elif system == "Darwin":
lowerCamelCase__ : Any = 'avfoundation'
lowerCamelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCamelCase__ : Any = 'dshow'
lowerCamelCase__ : List[Any] = 'default'
lowerCamelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase__ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase__ : str = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase )
for item in iterator:
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "f32le" , ):
if stream_chunk_s is not None:
lowerCamelCase__ : Any = stream_chunk_s
else:
lowerCamelCase__ : List[str] = chunk_length_s
lowerCamelCase__ : Any = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase__ : int = np.intaa
lowerCamelCase__ : int = 2
elif format_for_conversion == "f32le":
lowerCamelCase__ : Optional[Any] = np.floataa
lowerCamelCase__ : Any = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase__ : Optional[Any] = chunk_length_s / 6
lowerCamelCase__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCamelCase , (int, float) ):
lowerCamelCase__ : List[Any] = [stride_length_s, stride_length_s]
lowerCamelCase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase__ : Any = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase__ : Tuple = datetime.datetime.now()
lowerCamelCase__ : int = datetime.timedelta(seconds=_lowerCamelCase )
for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase__ : Optional[int] = np.frombuffer(item['raw'] , dtype=_lowerCamelCase )
lowerCamelCase__ : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Tuple = b''
lowerCamelCase__ : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase__ : int = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCamelCase ) < chunk_len:
lowerCamelCase__ : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase__ : str = (_stride_left, stride_right)
lowerCamelCase__ : List[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase__ : List[Any] = False
yield item
lowerCamelCase__ : List[Any] = stride_left
lowerCamelCase__ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCamelCase ) > stride_left:
lowerCamelCase__ : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase__ : Any = False
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase__ : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(1_00, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
A_ : str = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
A_ : int = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = CamembertTokenizer
lowerCamelCase__ : Optional[Any] = CamembertTokenizerFast
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Any = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = '<pad>'
lowerCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>NOTUSED' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(vocab_keys[-1], '<mask>' )
self.assertEqual(len(lowerCamelCase_ ), 1_0_0_4 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_5 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
lowerCamelCase__ : List[str] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer()
lowerCamelCase__ : str = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : str = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase__ : Dict = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='camembert-base', revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf', sequences=lowerCamelCase_, )
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ : List[str] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
A_ : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
A_ : int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
], )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase_, lowerCamelCase_ )["wer"]
else:
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Dict = 0
for prediction, reference in zip(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Union[str, Any] = compute_measures(lowerCamelCase_, lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[torch.FloatTensor] = None
lowerCamelCase__ : torch.FloatTensor = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=5_1_2, lowerCamelCase_="cls", lowerCamelCase_=False, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = project_dim
lowerCamelCase__ : int = pooler_fn
lowerCamelCase__ : Dict = learn_encoder
lowerCamelCase__ : Dict = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = [R'pooler', R'logit_scale']
lowerCamelCase__ : Optional[Any] = [R'position_ids', R'predictions.decoder.bias']
lowerCamelCase__ : str = 'roberta'
lowerCamelCase__ : List[str] = RobertaSeriesConfig
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = XLMRobertaModel(lowerCamelCase_ )
lowerCamelCase__ : List[str] = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : int = getattr(lowerCamelCase_, 'has_pre_transformation', lowerCamelCase_ )
if self.has_pre_transformation:
lowerCamelCase__ : int = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : Optional[int] = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def a__ (self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] = self.base_model(
input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, position_ids=lowerCamelCase_, head_mask=lowerCamelCase_, inputs_embeds=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_attentions=lowerCamelCase_, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCamelCase_, )
if self.has_pre_transformation:
lowerCamelCase__ : Any = outputs['hidden_states'][-2]
lowerCamelCase__ : int = self.pre_LN(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
lowerCamelCase__ : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = np.max(_lowerCamelCase , axis=-1 , keepdims=_lowerCamelCase )
lowerCamelCase__ : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = {}
if "second_text" in kwargs:
lowerCamelCase__ : Tuple = kwargs['second_text']
return preprocess_kwargs, {}, {}
def a__ (self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
return self.tokenizer(lowerCamelCase_, text_pair=lowerCamelCase_, return_tensors=self.framework )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.model(**lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = model_outputs.logits[0].numpy()
lowerCamelCase__ : List[str] = softmax(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = np.argmax(lowerCamelCase_ )
lowerCamelCase__ : str = self.model.config.idalabel[best_class]
lowerCamelCase__ : List[str] = probabilities[best_class].item()
lowerCamelCase__ : Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase )
if number < 1:
lowerCamelCase__ : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowerCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase__ : Any = int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase__ : List[Any] = [3, 5]
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : List[Any] = 3
for block in range(1 , _lowerCamelCase ):
for _ in range(_lowerCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A_ : Dict = 0
try:
A_ : Any = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[str] = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : int = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
return inputs_dict
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : str = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Tuple = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Tuple = embedding_size
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Tuple = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFMobileBertModel(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
lowerCamelCase__ : Dict = [input_ids, input_mask]
lowerCamelCase__ : int = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
lowerCamelCase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForPreTraining(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_choices
lowerCamelCase__ : List[Any] = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : Tuple = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase__ : str = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase_, atol=1e-4 )
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : str = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
lowerCamelCase__ : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
lowerCamelCase__ : Dict = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
lowerCamelCase__ : Union[str, Any] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : Optional[int] = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Any = f'''layers_{str(_lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
lowerCamelCase__ : str = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
lowerCamelCase__ : int = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
lowerCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : List[Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : Any = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : Dict = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Union[str, Any] = flax_model.params['encoder']['block'][str(_lowerCamelCase )]['layer']
lowerCamelCase__ : Optional[int] = tax_attention_key
lowerCamelCase__ : Dict = tax_attention_out
lowerCamelCase__ : Union[str, Any] = tax_attention_query
lowerCamelCase__ : Dict = tax_attention_value
lowerCamelCase__ : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple = tax_mlp_wi_a
lowerCamelCase__ : Optional[Any] = tax_mlp_wi_a
else:
lowerCamelCase__ : str = tax_mlp_wi
lowerCamelCase__ : str = tax_mlp_wo
lowerCamelCase__ : Tuple = tax_mlp_layer_norm
lowerCamelCase__ : int = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Optional[int] = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
lowerCamelCase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : str = tax_model['target']['encoder']['encoder_norm']['scale']
lowerCamelCase__ : str = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Optional[int] = f'''layers_{str(_lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
lowerCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
lowerCamelCase__ : str = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
lowerCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
lowerCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
lowerCamelCase__ : int = tax_enc_dec_attention_module['key']['kernel']
lowerCamelCase__ : Dict = tax_enc_dec_attention_module['out']['kernel']
lowerCamelCase__ : int = tax_enc_dec_attention_module['query']['kernel']
lowerCamelCase__ : int = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : int = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : str = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : Optional[int] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Union[str, Any] = flax_model.params['decoder']['block'][str(_lowerCamelCase )]['layer']
lowerCamelCase__ : List[str] = tax_attention_key
lowerCamelCase__ : int = tax_attention_out
lowerCamelCase__ : Optional[Any] = tax_attention_query
lowerCamelCase__ : Union[str, Any] = tax_attention_value
lowerCamelCase__ : Union[str, Any] = tax_pre_attention_layer_norm
lowerCamelCase__ : Optional[int] = tax_enc_dec_attention_key
lowerCamelCase__ : str = tax_enc_dec_attention_out
lowerCamelCase__ : Union[str, Any] = tax_enc_dec_attention_query
lowerCamelCase__ : Dict = tax_enc_dec_attention_value
lowerCamelCase__ : List[str] = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] = tax_mlp_wi_a
lowerCamelCase__ : Union[str, Any] = tax_mlp_wi_a
else:
lowerCamelCase__ : Tuple = tax_mlp_wi
lowerCamelCase__ : Dict = tax_mlp_wo
lowerCamelCase__ : Any = txa_mlp_layer_norm
lowerCamelCase__ : str = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : str = tax_model['target']['decoder']['decoder_norm']['scale']
lowerCamelCase__ : Dict = txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : List[str] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Dict = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : List[Any] = tax_model['target']['token_embedder']['embedding']
lowerCamelCase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : List[Any] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(_lowerCamelCase )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
A_ : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ): # noqa: E741
'''simple docstring'''
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = 0
lowerCamelCase__ : Any = [0] * n
lowerCamelCase__ : str = [False] * n
lowerCamelCase__ : Optional[int] = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if parent == root:
out_edge_count += 1
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCamelCase__ : str = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : str = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCamelCase__ : Union[str, Any] = True
# AP found via cycle
if at == low[to]:
lowerCamelCase__ : List[str] = True
else:
lowerCamelCase__ : Dict = min(low[at] , _lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = dfs(_lowerCamelCase , _lowerCamelCase , -1 , _lowerCamelCase )
lowerCamelCase__ : int = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
A_ : Any = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0.00
lowerCamelCase__ : int = 0
for resistor in resistors:
if resistor <= 0:
lowerCamelCase__ : Union[str, Any] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowerCamelCase )
first_sum += 1 / float(_lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = 0.00
lowerCamelCase__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCamelCase__ : Optional[int] = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowerCamelCase__ : List[Any] = math.log(len(_lowerCamelCase ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase__ : Dict = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase__ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase__ : Any = primes[:idx]
break
lowerCamelCase__ : int = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase__ : Optional[Any] = False
for r in range(_lowerCamelCase ):
lowerCamelCase__ : Any = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase__ : Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase_ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCamelCase_ ( _lowerCamelCase ):
return np.dot(_lowerCamelCase , _lowerCamelCase )
class a_ :
'''simple docstring'''
def __init__(self, *,
lowerCamelCase_ = np.inf, lowerCamelCase_ = "linear", lowerCamelCase_ = 0.0, ):
'''simple docstring'''
lowerCamelCase__ : Any = regularization
lowerCamelCase__ : Optional[int] = gamma
if kernel == "linear":
lowerCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma, (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
lowerCamelCase__ : List[str] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__ : List[Any] = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return np.dot(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = observations
lowerCamelCase__ : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(lowerCamelCase__ ) : List[Any] = np.shape(lowerCamelCase_ )
def to_minimize(lowerCamelCase_ ) -> float:
lowerCamelCase__ : Union[str, Any] = 0
(lowerCamelCase__ ) : Tuple = np.shape(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase_ )
lowerCamelCase__ : Dict = LinearConstraint(lowerCamelCase_, 0, 0 )
lowerCamelCase__ : int = Bounds(0, self.regularization )
lowerCamelCase__ : int = minimize(
lowerCamelCase_, np.ones(lowerCamelCase_ ), bounds=lowerCamelCase_, constraints=[ly_contraint] ).x
lowerCamelCase__ : Union[str, Any] = l_star
# calculating mean offset of separation plane to points
lowerCamelCase__ : int = 0
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j] )
lowerCamelCase__ : Optional[Any] = s / n
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], lowerCamelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowerCamelCase__ : Dict = True
for i in range(0 , len(_lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase__ : str = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase__ : List[Any] = False
for i in range(1 , len(_lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase__ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase__ : Optional[int] = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A_ : Any = [int(x) for x in input().split()]
# inputing elements of the list in one line
A_ : Tuple = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : List[Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ["OwlViTFeatureExtractor"]
A_ : List[str] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : Optional[torch.FloatTensor] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=0.999 , _lowerCamelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase__ : Optional[Any] = []
for i in range(_lowerCamelCase ):
lowerCamelCase__ : int = i / num_diffusion_timesteps
lowerCamelCase__ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class a_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 1
@register_to_config
def __init__(self, lowerCamelCase_ = 1_0_0_0, lowerCamelCase_ = 0.0_001, lowerCamelCase_ = 0.02, lowerCamelCase_ = "linear", lowerCamelCase_ = None, lowerCamelCase_ = True, lowerCamelCase_ = True, lowerCamelCase_ = 0, lowerCamelCase_ = "epsilon", lowerCamelCase_ = 1.0, **lowerCamelCase_, ):
'''simple docstring'''
if kwargs.get('set_alpha_to_one', lowerCamelCase_ ) is not None:
lowerCamelCase__ : Union[str, Any] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one', '1.0.0', lowerCamelCase_, standard_warn=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs['set_alpha_to_one']
if trained_betas is not None:
lowerCamelCase__ : Union[str, Any] = torch.tensor(lowerCamelCase_, dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase__ : Dict = torch.linspace(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase__ : Optional[int] = (
torch.linspace(beta_start**0.5, beta_end**0.5, lowerCamelCase_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase__ : Dict = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase__ : Tuple = 1.0 - self.betas
lowerCamelCase__ : Optional[Any] = torch.cumprod(self.alphas, dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCamelCase__ : Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCamelCase__ : List[Any] = 1.0
# setable values
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = torch.from_numpy(np.arange(0, lowerCamelCase_ ).copy().astype(np.intaa ) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
return sample
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
lowerCamelCase__ : List[str] = num_inference_steps
lowerCamelCase__ : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : List[str] = (np.arange(0, lowerCamelCase_ ) * step_ratio).round().copy().astype(np.intaa )
lowerCamelCase__ : Dict = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
self.timesteps += self.config.steps_offset
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 0.0, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
lowerCamelCase__ : str = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCamelCase__ : Union[str, Any] = self.alphas_cumprod[timestep]
lowerCamelCase__ : Tuple = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCamelCase__ : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCamelCase__ : int = model_output
elif self.config.prediction_type == "sample":
lowerCamelCase__ : Optional[int] = model_output
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCamelCase__ : Dict = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_, pred_original_sample=lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCamelCase__ : Union[str, Any] = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCamelCase__ : str = {'unk_token': '<unk>'}
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname, lowerCamelCase_ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(lowerCamelCase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Dict = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowerCamelCase__ : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase_, padding_value=1.0 )
lowerCamelCase__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : int = image_processor(lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : Optional[Any] = processor(images=lowerCamelCase_, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 'lower newer'
lowerCamelCase__ : Optional[int] = processor(text=lowerCamelCase_ )
lowerCamelCase__ : Dict = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Tuple = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(lowerCamelCase_ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : int = CLIPProcessor(tokenizer=lowerCamelCase_, image_processor=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Dict = processor(text=lowerCamelCase_, images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 721 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A_ : Tuple = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['input_features', 'attention_mask']
def __init__(self, lowerCamelCase_=8_0, lowerCamelCase_=1_6_0_0_0, lowerCamelCase_=0.0, lowerCamelCase_=1_0, lowerCamelCase_=2_5, lowerCamelCase_="hamming_window", lowerCamelCase_=3_2_7_6_8.0, lowerCamelCase_=0.97, lowerCamelCase_=1.0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase_, sampling_rate=lowerCamelCase_, padding_value=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = feature_size
lowerCamelCase__ : Union[str, Any] = sampling_rate
lowerCamelCase__ : List[str] = padding_value
lowerCamelCase__ : Optional[int] = hop_length
lowerCamelCase__ : List[Any] = win_length
lowerCamelCase__ : Tuple = frame_signal_scale
lowerCamelCase__ : Tuple = preemphasis_coeff
lowerCamelCase__ : Optional[Any] = mel_floor
lowerCamelCase__ : str = normalize_means
lowerCamelCase__ : Dict = normalize_vars
lowerCamelCase__ : List[str] = win_function
lowerCamelCase__ : Optional[Any] = return_attention_mask
lowerCamelCase__ : int = win_length * sampling_rate // 1_0_0_0
lowerCamelCase__ : Any = hop_length * sampling_rate // 1_0_0_0
lowerCamelCase__ : List[str] = optimal_fft_length(self.sample_size )
lowerCamelCase__ : str = (self.n_fft // 2) + 1
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCamelCase__ : str = window_function(window_length=self.sample_size, name=self.win_function, periodic=lowerCamelCase_ )
else:
lowerCamelCase__ : str = window_function(window_length=self.sample_size, name=self.win_function )
lowerCamelCase__ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
lowerCamelCase__ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale, window=lowerCamelCase_, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=lowerCamelCase_, preemphasis=self.preemphasis_coeff, mel_filters=lowerCamelCase_, mel_floor=self.mel_floor, log_mel='log', )
return msfc_features.T
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.normalize_means:
lowerCamelCase__ : Tuple = x[:input_length].mean(axis=0 )
lowerCamelCase__ : Optional[int] = np.subtract(lowerCamelCase_, lowerCamelCase_ )
if self.normalize_vars:
lowerCamelCase__ : Dict = x[:input_length].std(axis=0 )
lowerCamelCase__ : Dict = np.divide(lowerCamelCase_, lowerCamelCase_ )
if input_length < x.shape[0]:
lowerCamelCase__ : Optional[Any] = padding_value
# make sure array is in float32
lowerCamelCase__ : int = x.astype(np.floataa )
return x
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_, lowerCamelCase_, self.padding_value ) for x, n in zip(lowerCamelCase_, lowerCamelCase_ )]
def __call__(self, lowerCamelCase_, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : List[Any] = isinstance(lowerCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ : int = is_batched_numpy or (
isinstance(lowerCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : int = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_, np.ndarray ):
lowerCamelCase__ : Tuple = np.asarray(lowerCamelCase_, dtype=np.floataa )
elif isinstance(lowerCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : str = [raw_speech]
# extract fbank features
lowerCamelCase__ : Dict = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ : List[str] = BatchFeature({'input_features': features} )
lowerCamelCase__ : Any = self.pad(
lowerCamelCase_, padding=lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_, pad_to_multiple_of=lowerCamelCase_, return_attention_mask=lowerCamelCase_, **lowerCamelCase_, )
# make sure list is in array format
lowerCamelCase__ : int = padded_inputs.get('input_features' )
if isinstance(input_features[0], lowerCamelCase_ ):
lowerCamelCase__ : Dict = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ : Any = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCamelCase__ : int = [np.asarray(lowerCamelCase_, dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ : int = (
np.array(lowerCamelCase_, dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_, max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ : Tuple = self.normalize(
padded_inputs['input_features'], attention_mask=lowerCamelCase_ )
if return_tensors is not None:
lowerCamelCase__ : str = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 700 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A_ : Optional[int] = logging.get_logger(__name__)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = question_encoder
lowerCamelCase__ : Optional[int] = generator
lowerCamelCase__ : Optional[Any] = self.question_encoder
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCamelCase_, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(lowerCamelCase_, 'question_encoder_tokenizer' )
lowerCamelCase__ : str = os.path.join(lowerCamelCase_, 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowerCamelCase_ )
self.generator.save_pretrained(lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase__ : List[Any] = kwargs.pop('config', lowerCamelCase_ )
if config is None:
lowerCamelCase__ : int = RagConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
lowerCamelCase_, config=config.question_encoder, subfolder='question_encoder_tokenizer' )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(
lowerCamelCase_, config=config.generator, subfolder='generator_tokenizer' )
return cls(question_encoder=lowerCamelCase_, generator=lowerCamelCase_ )
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.current_tokenizer(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.generator.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.generator.decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.question_encoder
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.generator
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = "longest", lowerCamelCase_ = None, lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details', lowerCamelCase_, )
if max_length is None:
lowerCamelCase__ : Dict = self.current_tokenizer.model_max_length
lowerCamelCase__ : Union[str, Any] = self(
lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, max_length=lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, **lowerCamelCase_, )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase__ : str = self.current_tokenizer.model_max_length
lowerCamelCase__ : Union[str, Any] = self(
text_target=lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, padding=lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Tuple = labels['input_ids']
return model_inputs
| 701 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
lowerCamelCase__ : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCamelCase__ : List[str] = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCamelCase__ : Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCamelCase__ : int = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCamelCase__ : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float64' , [dim] )
lowerCamelCase__ : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCamelCase__ : Any = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCamelCase__ : Union[str, Any] = tf.placeholder('int32' )
lowerCamelCase__ : Dict = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCamelCase__ : Optional[Any] = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCamelCase__ : List[Any] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCamelCase__ : Tuple = tf.placeholder('float' , [dim] )
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float' , [dim] )
lowerCamelCase__ : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float' , [noofclusters] )
lowerCamelCase__ : Optional[Any] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCamelCase__ : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCamelCase__ : Tuple = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCamelCase__ : Dict = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCamelCase__ : List[str] = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
lowerCamelCase__ : Any = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCamelCase__ : int = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCamelCase__ : Tuple = sess.run(_lowerCamelCase )
lowerCamelCase__ : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 702 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
if attention_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Tuple = OPTConfig
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Union[str, Any] = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=2_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=1_6, lowerCamelCase_=1_6, ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : Optional[Any] = pad_token_id
lowerCamelCase__ : str = bos_token_id
lowerCamelCase__ : int = embed_dim
lowerCamelCase__ : List[str] = word_embed_proj_dim
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Tuple = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=lowerCamelCase_, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_opt_inputs_dict(lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFOPTModel(config=lowerCamelCase_ )
lowerCamelCase__ : str = inputs_dict['input_ids']
lowerCamelCase__ : int = input_ids[:1, :]
lowerCamelCase__ : List[str] = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Dict = 1
# first forward pass
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Dict = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Dict = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Union[str, Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : Union[str, Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[Any] = 10
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFOPTModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_, lowerCamelCase_ ):
if hasattr(lowerCamelCase_, 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_, 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCamelCase__ : Optional[int] = model_class(config=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = _get_word_embedding_weight(lowerCamelCase_, model.get_input_embeddings() )
lowerCamelCase__ : Union[str, Any] = _get_word_embedding_weight(lowerCamelCase_, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _get_word_embedding_weight(lowerCamelCase_, model.get_input_embeddings() )
lowerCamelCase__ : str = _get_word_embedding_weight(lowerCamelCase_, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__ : Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], lowerCamelCase_ )
# check that weights remain the same after resizing
lowerCamelCase__ : Union[str, Any] = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__ : List[Any] = False
self.assertTrue(lowerCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], lowerCamelCase_ )
lowerCamelCase__ : str = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__ : Tuple = False
self.assertTrue(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 99
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = tf.ones((4, 1), dtype=tf.intaa ) * 2
lowerCamelCase__ : Optional[int] = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
lowerCamelCase__ : str = input_ids.shape[0]
lowerCamelCase__ : List[str] = OPTConfig(
vocab_size=self.vocab_size, hidden_size=2_4, num_hidden_layers=2, num_attention_heads=2, ffn_dim=3_2, max_position_embeddings=4_8, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCamelCase__ : Dict = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase__ : str = tf.not_equal(lowerCamelCase_, model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__ : List[str] = model(input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[Any] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=4e-3 ) )
lowerCamelCase__ : str = tf.function(lowerCamelCase_, jit_compile=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = xla_generate(lowerCamelCase_, lowerCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Dict = 'facebook/opt-350m'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__ : Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__ : Tuple = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__ : Dict = tokenizer(lowerCamelCase_, return_tensors='tf', padding=lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
lowerCamelCase__ : Any = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : int = tf.function(lowerCamelCase_, jit_compile=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'facebook/opt-125m'
lowerCamelCase__ : str = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Dict = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, return_tensors='tf' ).input_ids
lowerCamelCase__ : Union[str, Any] = model.generate(lowerCamelCase_, max_length=1_0 )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'facebook/opt-350m'
lowerCamelCase__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 'left'
# use different length sentences to test batching
lowerCamelCase__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase__ : List[str] = tokenizer(lowerCamelCase_, return_tensors='tf', padding=lowerCamelCase_ )
lowerCamelCase__ : List[str] = inputs['input_ids']
lowerCamelCase__ : Optional[int] = model.generate(input_ids=lowerCamelCase_, attention_mask=inputs['attention_mask'] )
lowerCamelCase__ : List[str] = tokenizer(sentences[0], return_tensors='tf' ).input_ids
lowerCamelCase__ : Optional[Any] = model.generate(input_ids=lowerCamelCase_ )
lowerCamelCase__ : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1], tf.intaa ) )
lowerCamelCase__ : List[str] = tokenizer(sentences[1], return_tensors='tf' ).input_ids
lowerCamelCase__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase_, max_length=model.config.max_length - num_paddings )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Tuple = tokenizer.decode(output_padded[0], skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, [non_padded_sentence, padded_sentence] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'facebook/opt-350m'
lowerCamelCase__ : Union[str, Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
lowerCamelCase__ : Tuple = tokenizer(lowerCamelCase_, return_tensors='tf' ).input_ids
lowerCamelCase__ : int = model.generate(lowerCamelCase_, max_length=1_0 )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
| 703 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
A_ : Any = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
A_ : Union[str, Any] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
A_ : Optional[int] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
lowerCamelCase__ : List[Any] = pearsonr(lowerCamelCase_, lowerCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_, lowerCamelCase_ )[0] )}
| 704 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 705 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = text, pattern
lowerCamelCase__ : List[Any] = len(lowerCamelCase_ ), len(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase__ : Tuple = self.mismatch_in_text(lowerCamelCase_ )
if mismatch_index == -1:
positions.append(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : Dict = "ABAABA"
A_ : Any = "AB"
A_ : Optional[int] = BoyerMooreSearch(text, pattern)
A_ : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A_ : Optional[int] = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.', lowerCamelCase_, )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
| 707 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A_ : List[Any] = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
A_ : Dict = {"mobilebert-uncased": 5_12}
A_ : int = {}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : int = MobileBertTokenizer
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_="[UNK]", lowerCamelCase_="[SEP]", lowerCamelCase_="[PAD]", lowerCamelCase_="[CLS]", lowerCamelCase_="[MASK]", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents', lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[Any] = getattr(lowerCamelCase_, normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[int] = do_lower_case
lowerCamelCase__ : Tuple = strip_accents
lowerCamelCase__ : str = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**lowerCamelCase_ )
lowerCamelCase__ : str = do_lower_case
def a__ (self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 708 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = DownBlockaD # noqa F405
lowerCamelCase__ : Tuple = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ : int = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AttnDownBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Dict = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps', 'MPS result is not consistent' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = SkipDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = DownEncoderBlockaD # noqa F405
lowerCamelCase__ : Optional[int] = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCamelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ : Any = 'down'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = {
'in_channels': 3_2,
'out_channels': 3_2,
}
lowerCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = UNetMidBlockaD # noqa F405
lowerCamelCase__ : str = 'mid'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
lowerCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ : List[str] = 'mid'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Dict = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ : Optional[Any] = 'mid'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = UpBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ : str = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : List[str] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ : Optional[Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_, include_encoder_hidden_states=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[Any] = 3_2
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AttnUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
@unittest.skipIf(torch_device == 'mps', 'MPS result is not consistent' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = SkipUpBlockaD # noqa F405
lowerCamelCase__ : Union[str, Any] = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = UpDecoderBlockaD # noqa F405
lowerCamelCase__ : Dict = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCamelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(lowerCamelCase_ )
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ : Any = 'up'
@property
def a__ (self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = {'in_channels': 3_2, 'out_channels': 3_2}
lowerCamelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(lowerCamelCase_ )
| 709 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('check_bouncy() accepts only integer arguments' )
lowerCamelCase__ : List[Any] = str(_lowerCamelCase )
lowerCamelCase__ : str = ''.join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase_ ( _lowerCamelCase = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 710 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
class a_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ : Tuple = PandasConfig
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_, (str, list, tuple) ):
lowerCamelCase__ : Optional[int] = data_files
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )]
lowerCamelCase__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : List[Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_, gen_kwargs={'files': files} ) )
return splits
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__ : Optional[int] = table_cast(lowerCamelCase_, self.config.features.arrow_schema )
return pa_table
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
with open(lowerCamelCase_, 'rb' ) as f:
lowerCamelCase__ : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase_ ) )
yield i, self._cast_table(lowerCamelCase_ )
| 712 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'megatron-bert'
def __init__(self, lowerCamelCase_=2_9_0_5_6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_4, lowerCamelCase_=1_6, lowerCamelCase_=4_0_9_6, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_="absolute", lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = position_embedding_type
lowerCamelCase__ : Optional[int] = use_cache
| 713 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = v.conjugate().T
lowerCamelCase__ : Optional[Any] = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCamelCase__ : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : Tuple = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
A_ : Optional[Any] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
A_ : Dict = {f"funnel-transformer/{name}": 5_12 for name in _model_names}
A_ : str = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : List[str] = FunnelTokenizer
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : int = 2
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_="<unk>", lowerCamelCase_="<sep>", lowerCamelCase_="<pad>", lowerCamelCase_="<cls>", lowerCamelCase_="<mask>", lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_="##", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, clean_text=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, wordpieces_prefix=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents', lowerCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase__ : List[str] = getattr(lowerCamelCase_, normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[int] = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : Any = tokenize_chinese_chars
lowerCamelCase__ : List[str] = normalizer_class(**lowerCamelCase_ )
lowerCamelCase__ : int = do_lower_case
def a__ (self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 715 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
A_ : Union[str, Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
A_ : List[str] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
A_ : Union[str, Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_, lowerCamelCase_, sample_weight=lowerCamelCase_ ) ),
}
| 716 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 0 |
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
try:
lowerCamelCase__ : List[Any] = split_input(_lowerCamelCase )
if upper:
lowerCamelCase__ : Any = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCamelCase__ : Union[str, Any] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase_ ( _lowerCamelCase ):
return to_simple_case(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = to_simple_case(_lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(_lowerCamelCase , _lowerCamelCase , '_' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(_lowerCamelCase , _lowerCamelCase , '-' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=2_2_4, lowerCamelCase_=1_0_0_0, lowerCamelCase_=[3, 3, 6, 4], lowerCamelCase_=[4_8, 5_6, 1_1_2, 2_2_0], ):
'''simple docstring'''
lowerCamelCase__ : Any = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : List[Any] = layer_depths
lowerCamelCase__ : Dict = embed_dims
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act='gelu', num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=lowerCamelCase_, layer_scale_init_value=1e-5, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = SwiftFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Optional[Any] = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
lowerCamelCase__ : Any = SwiftFormerForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self ):
'''simple docstring'''
(lowerCamelCase__) : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SwiftFormerModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(
self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7, num_attention_heads=1_2, num_hidden_layers=1_2, )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(lowerCamelCase_ )
lowerCamelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = SwiftFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Dict = 8
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase_ ) ):
self.assertEqual(
hidden_states[i].shape, torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ), )
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : int = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
def _config_zero_init(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(lowerCamelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase_, lowerCamelCase_, 1e-10 )
if isinstance(getattr(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ), lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = _config_zero_init(getattr(lowerCamelCase_, lowerCamelCase_ ) )
setattr(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return configs_no_init
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCamelCase_ )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : List[str] = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 719 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 720 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.